use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class TermVectorReusingLeafReader method doHighlighting.
/**
* Generates a list of Highlighted query fragments for each item in a list
* of documents, or returns null if highlighting is disabled.
*
* @param docs query results
* @param query the query
* @param req the current request
* @param defaultFields default list of fields to summarize
*
* @return NamedList containing a NamedList for each document, which in
* turns contains sets (field, summary) pairs.
*/
@Override
@SuppressWarnings("unchecked")
public NamedList<Object> doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException {
SolrParams params = req.getParams();
if (// also returns early if no unique key field
!isHighlightingEnabled(params))
return null;
boolean rewrite = query != null && !(Boolean.valueOf(params.get(HighlightParams.USE_PHRASE_HIGHLIGHTER, "true")) && Boolean.valueOf(params.get(HighlightParams.HIGHLIGHT_MULTI_TERM, "true")));
if (rewrite) {
query = query.rewrite(req.getSearcher().getIndexReader());
}
SolrIndexSearcher searcher = req.getSearcher();
IndexSchema schema = searcher.getSchema();
// fetch unique key if one exists.
SchemaField keyField = schema.getUniqueKeyField();
if (keyField == null) {
//exit early; we need a unique key field to populate the response
return null;
}
String[] fieldNames = getHighlightFields(query, req, defaultFields);
Set<String> preFetchFieldNames = getDocPrefetchFieldNames(fieldNames, req);
if (preFetchFieldNames != null) {
preFetchFieldNames.add(keyField.getName());
}
// Lazy container for fvh and fieldQuery
FvhContainer fvhContainer = new FvhContainer(null, null);
// SOLR-5855
IndexReader reader = new TermVectorReusingLeafReader(req.getSearcher().getSlowAtomicReader());
// Highlight each document
NamedList fragments = new SimpleOrderedMap();
DocIterator iterator = docs.iterator();
for (int i = 0; i < docs.size(); i++) {
int docId = iterator.nextDoc();
Document doc = searcher.doc(docId, preFetchFieldNames);
@SuppressWarnings("rawtypes") NamedList docHighlights = new SimpleOrderedMap();
// Highlight per-field
for (String fieldName : fieldNames) {
SchemaField schemaField = schema.getFieldOrNull(fieldName);
// object type allows flexibility for subclassers
Object fieldHighlights;
fieldHighlights = doHighlightingOfField(doc, docId, schemaField, fvhContainer, query, reader, req, params);
if (fieldHighlights == null) {
fieldHighlights = alternateField(doc, docId, fieldName, fvhContainer, query, reader, req);
}
if (fieldHighlights != null) {
docHighlights.add(fieldName, fieldHighlights);
}
}
// for each field
fragments.add(schema.printableUniqueKey(doc), docHighlights);
}
// for each doc
return fragments;
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class TermVectorReusingLeafReader method alternateField.
/** Returns the alternate highlight object for this field -- a String[] by default. Null if none. */
@SuppressWarnings("unchecked")
protected Object alternateField(Document doc, int docId, String fieldName, FvhContainer fvhContainer, Query query, IndexReader reader, SolrQueryRequest req) throws IOException {
IndexSchema schema = req.getSearcher().getSchema();
SolrParams params = req.getParams();
String alternateField = params.getFieldParam(fieldName, HighlightParams.ALTERNATE_FIELD);
int alternateFieldLen = params.getFieldInt(fieldName, HighlightParams.ALTERNATE_FIELD_LENGTH, 0);
if (alternateField == null || alternateField.length() == 0) {
return null;
}
if (params.getFieldBool(fieldName, HighlightParams.HIGHLIGHT_ALTERNATE, true) && !alternateField.equals(fieldName)) {
// Try to highlight alternate field
Object fieldHighlights = null;
SchemaField schemaField = schema.getFieldOrNull(alternateField);
if (schemaField != null) {
HashMap<String, String> invariants = new HashMap<>();
invariants.put("f." + alternateField + "." + HighlightParams.SNIPPETS, "1");
// Enforce maxAlternateFieldLength by FRAGSIZE. Minimum 18 due to FVH limitations
invariants.put("f." + alternateField + "." + HighlightParams.FRAGSIZE, alternateFieldLen > 0 ? String.valueOf(Math.max(18, alternateFieldLen)) : String.valueOf(Integer.MAX_VALUE));
SolrParams origParams = req.getParams();
req.setParams(SolrParams.wrapDefaults(new MapSolrParams(invariants), origParams));
fieldHighlights = doHighlightingOfField(doc, docId, schemaField, fvhContainer, query, reader, req, params);
req.setParams(origParams);
if (fieldHighlights != null) {
return fieldHighlights;
}
}
}
// Fallback to static non-highlighted
IndexableField[] docFields = doc.getFields(alternateField);
if (docFields.length == 0) {
// The alternate field did not exist, treat the original field as fallback instead
docFields = doc.getFields(fieldName);
}
List<String> listFields = new ArrayList<>();
for (IndexableField field : docFields) {
if (field.binaryValue() == null)
listFields.add(field.stringValue());
}
if (listFields.isEmpty()) {
return null;
}
String[] altTexts = listFields.toArray(new String[listFields.size()]);
Encoder encoder = getEncoder(fieldName, params);
List<String> altList = new ArrayList<>();
int len = 0;
for (String altText : altTexts) {
if (alternateFieldLen <= 0) {
altList.add(encoder.encodeText(altText));
} else {
altList.add(len + altText.length() > alternateFieldLen ? encoder.encodeText(altText.substring(0, alternateFieldLen - len)) : encoder.encodeText(altText));
len += altText.length();
if (len >= alternateFieldLen)
break;
}
}
return altList;
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class TestGroupingSearch method testRandomGrouping.
@Test
public void testRandomGrouping() throws Exception {
/**
updateJ("{\"add\":{\"doc\":{\"id\":\"77\"}}}", params("commit","true"));
assertJQ(req("q","id:77"), "/response/numFound==1");
Doc doc = createDocObj(types);
updateJ(toJSON(doc), params("commit","true"));
assertJQ(req("q","id:"+doc.id), "/response/numFound==1");
**/
// make >0 to enable test
int indexIter = atLeast(10);
int queryIter = atLeast(50);
while (--indexIter >= 0) {
int indexSize = random().nextInt(25 * RANDOM_MULTIPLIER);
//indexSize=2;
List<FldType> types = new ArrayList<>();
types.add(new FldType("id", ONE_ONE, new SVal('A', 'Z', 4, 4)));
// field used to score
types.add(new FldType("score_f", ONE_ONE, new FVal(1, 100)));
types.add(new FldType("foo_i", ZERO_ONE, new IRange(0, indexSize)));
types.add(new FldType(FOO_STRING_FIELD, ONE_ONE, new SVal('a', 'z', 1, 2)));
types.add(new FldType(SMALL_STRING_FIELD, ZERO_ONE, new SVal('a', (char) ('c' + indexSize / 10), 1, 1)));
types.add(new FldType(SMALL_INT_FIELD, ZERO_ONE, new IRange(0, 5 + indexSize / 10)));
clearIndex();
Map<Comparable, Doc> model = indexDocs(types, null, indexSize);
// test with specific docs
if (false) {
clearIndex();
model.clear();
Doc d1 = createDoc(types);
d1.getValues(SMALL_STRING_FIELD).set(0, "c");
d1.getValues(SMALL_INT_FIELD).set(0, 5);
d1.order = 0;
updateJ(toJSON(d1), params("commit", "true"));
model.put(d1.id, d1);
d1 = createDoc(types);
d1.getValues(SMALL_STRING_FIELD).set(0, "b");
d1.getValues(SMALL_INT_FIELD).set(0, 5);
d1.order = 1;
updateJ(toJSON(d1), params("commit", "false"));
model.put(d1.id, d1);
d1 = createDoc(types);
d1.getValues(SMALL_STRING_FIELD).set(0, "c");
d1.getValues(SMALL_INT_FIELD).set(0, 5);
d1.order = 2;
updateJ(toJSON(d1), params("commit", "false"));
model.put(d1.id, d1);
d1 = createDoc(types);
d1.getValues(SMALL_STRING_FIELD).set(0, "c");
d1.getValues(SMALL_INT_FIELD).set(0, 5);
d1.order = 3;
updateJ(toJSON(d1), params("commit", "false"));
model.put(d1.id, d1);
d1 = createDoc(types);
d1.getValues(SMALL_STRING_FIELD).set(0, "b");
d1.getValues(SMALL_INT_FIELD).set(0, 2);
d1.order = 4;
updateJ(toJSON(d1), params("commit", "true"));
model.put(d1.id, d1);
}
for (int qiter = 0; qiter < queryIter; qiter++) {
String groupField = types.get(random().nextInt(types.size())).fname;
int rows = random().nextInt(10) == 0 ? random().nextInt(model.size() + 2) : random().nextInt(11) - 1;
// pick a small start normally for better coverage
int start = random().nextInt(5) == 0 ? random().nextInt(model.size() + 2) : random().nextInt(5);
int group_limit = random().nextInt(10) == 0 ? random().nextInt(model.size() + 2) : random().nextInt(11) - 1;
// pick a small start normally for better coverage
int group_offset = random().nextInt(10) == 0 ? random().nextInt(model.size() + 2) : random().nextInt(2);
IndexSchema schema = h.getCore().getLatestSchema();
String[] stringSortA = new String[1];
Comparator<Doc> sortComparator = createSort(schema, types, stringSortA);
String sortStr = stringSortA[0];
Comparator<Doc> groupComparator = random().nextBoolean() ? sortComparator : createSort(schema, types, stringSortA);
String groupSortStr = stringSortA[0];
// sortStr != null.
if (groupSortStr == null && groupSortStr != sortStr) {
groupSortStr = "score desc";
}
// Test specific case
if (false) {
groupField = SMALL_INT_FIELD;
sortComparator = createComparator(Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, true)));
sortStr = SMALL_STRING_FIELD + " asc";
groupComparator = createComparator(Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, false)));
groupSortStr = SMALL_STRING_FIELD + " asc";
rows = 1;
start = 0;
group_offset = 1;
group_limit = 1;
}
Map<Comparable, Grp> groups = groupBy(model.values(), groupField);
// first sort the docs in each group
for (Grp grp : groups.values()) {
Collections.sort(grp.docs, groupComparator);
}
// if sort != group.sort, we need to find the max doc by "sort"
if (groupComparator != sortComparator) {
for (Grp grp : groups.values()) grp.setMaxDoc(sortComparator);
}
List<Grp> sortedGroups = new ArrayList<>(groups.values());
Collections.sort(sortedGroups, groupComparator == sortComparator ? createFirstDocComparator(sortComparator) : createMaxDocComparator(sortComparator));
boolean includeNGroups = random().nextBoolean();
Object modelResponse = buildGroupedResult(schema, sortedGroups, start, rows, group_offset, group_limit, includeNGroups);
boolean truncateGroups = random().nextBoolean();
Map<String, Integer> facetCounts = new TreeMap<>();
if (truncateGroups) {
for (Grp grp : sortedGroups) {
Doc doc = grp.docs.get(0);
if (doc.getValues(FOO_STRING_FIELD) == null) {
continue;
}
String key = doc.getFirstValue(FOO_STRING_FIELD).toString();
boolean exists = facetCounts.containsKey(key);
int count = exists ? facetCounts.get(key) : 0;
facetCounts.put(key, ++count);
}
} else {
for (Doc doc : model.values()) {
if (doc.getValues(FOO_STRING_FIELD) == null) {
continue;
}
for (Comparable field : doc.getValues(FOO_STRING_FIELD)) {
String key = field.toString();
boolean exists = facetCounts.containsKey(key);
int count = exists ? facetCounts.get(key) : 0;
facetCounts.put(key, ++count);
}
}
}
List<Comparable> expectedFacetResponse = new ArrayList<>();
for (Map.Entry<String, Integer> stringIntegerEntry : facetCounts.entrySet()) {
expectedFacetResponse.add(stringIntegerEntry.getKey());
expectedFacetResponse.add(stringIntegerEntry.getValue());
}
int randomPercentage = random().nextInt(101);
// TODO: create a random filter too
SolrQueryRequest req = req("group", "true", "wt", "json", "indent", "true", "echoParams", "all", "q", "{!func}score_f", "group.field", groupField, sortStr == null ? "nosort" : "sort", sortStr == null ? "" : sortStr, (groupSortStr == null || groupSortStr == sortStr) ? "noGroupsort" : "group.sort", groupSortStr == null ? "" : groupSortStr, "rows", "" + rows, "start", "" + start, "group.offset", "" + group_offset, "group.limit", "" + group_limit, GroupParams.GROUP_CACHE_PERCENTAGE, Integer.toString(randomPercentage), GroupParams.GROUP_TOTAL_COUNT, includeNGroups ? "true" : "false", "facet", "true", "facet.sort", "index", "facet.limit", "-1", "facet.field", FOO_STRING_FIELD, // to avoid FC insanity
GroupParams.GROUP_TRUNCATE, // to avoid FC insanity
truncateGroups ? "true" : "false", // to avoid FC insanity
"facet.mincount", // to avoid FC insanity
"1", // to avoid FC insanity
"facet.method", // to avoid FC insanity
"fcs");
String strResponse = h.query(req);
Object realResponse = ObjectBuilder.fromJSON(strResponse);
String err = JSONTestUtil.matchObj("/grouped/" + groupField, realResponse, modelResponse);
if (err != null) {
log.error("GROUPING MISMATCH (" + queryIter + "): " + err + "\n\trequest=" + req + "\n\tresult=" + strResponse + "\n\texpected=" + JSONUtil.toJSON(modelResponse) + "\n\tsorted_model=" + sortedGroups);
// re-execute the request... good for putting a breakpoint here for debugging
String rsp = h.query(req);
fail(err);
}
// assert post / pre grouping facets
err = JSONTestUtil.matchObj("/facet_counts/facet_fields/" + FOO_STRING_FIELD, realResponse, expectedFacetResponse);
if (err != null) {
log.error("GROUPING MISMATCH (" + queryIter + "): " + err + "\n\trequest=" + req + "\n\tresult=" + strResponse + "\n\texpected=" + JSONUtil.toJSON(expectedFacetResponse));
// re-execute the request... good for putting a breakpoint here for debugging
h.query(req);
fail(err);
}
}
// end query iter
}
// end index iter
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class TestLuceneMatchVersion method testStandardTokenizerVersions.
public void testStandardTokenizerVersions() throws Exception {
assertEquals(DEFAULT_VERSION, solrConfig.luceneMatchVersion);
final IndexSchema schema = h.getCore().getLatestSchema();
FieldType type = schema.getFieldType("textDefault");
TokenizerChain ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(DEFAULT_VERSION, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(DEFAULT_VERSION, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("textTurkishAnalyzerDefault");
Analyzer ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(DEFAULT_VERSION, ana1.getVersion());
}
use of org.apache.solr.schema.IndexSchema in project lucene-solr by apache.
the class QueryComponent method process.
/**
* Actually run the query
*/
@Override
public void process(ResponseBuilder rb) throws IOException {
LOG.debug("process: {}", rb.req.getParams());
SolrQueryRequest req = rb.req;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
SolrIndexSearcher searcher = req.getSearcher();
StatsCache statsCache = req.getCore().getStatsCache();
int purpose = params.getInt(ShardParams.SHARDS_PURPOSE, ShardRequest.PURPOSE_GET_TOP_IDS);
if ((purpose & ShardRequest.PURPOSE_GET_TERM_STATS) != 0) {
statsCache.returnLocalStats(rb, searcher);
return;
}
// check if we need to update the local copy of global dfs
if ((purpose & ShardRequest.PURPOSE_SET_TERM_STATS) != 0) {
// retrieve from request and update local cache
statsCache.receiveGlobalStats(req);
}
SolrQueryResponse rsp = rb.rsp;
IndexSchema schema = searcher.getSchema();
// Optional: This could also be implemented by the top-level searcher sending
// a filter that lists the ids... that would be transparent to
// the request handler, but would be more expensive (and would preserve score
// too if desired).
String ids = params.get(ShardParams.IDS);
if (ids != null) {
SchemaField idField = schema.getUniqueKeyField();
List<String> idArr = StrUtils.splitSmart(ids, ",", true);
int[] luceneIds = new int[idArr.size()];
int docs = 0;
if (idField.getType().isPointField()) {
for (int i = 0; i < idArr.size(); i++) {
int id = searcher.search(idField.getType().getFieldQuery(null, idField, idArr.get(i)), 1).scoreDocs[0].doc;
if (id >= 0) {
luceneIds[docs++] = id;
}
}
} else {
for (int i = 0; i < idArr.size(); i++) {
int id = searcher.getFirstMatch(new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
if (id >= 0)
luceneIds[docs++] = id;
}
}
DocListAndSet res = new DocListAndSet();
res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0);
if (rb.isNeedDocSet()) {
// TODO: create a cache for this!
List<Query> queries = new ArrayList<>();
queries.add(rb.getQuery());
List<Query> filters = rb.getFilters();
if (filters != null)
queries.addAll(filters);
res.docSet = searcher.getDocSet(queries);
}
rb.setResults(res);
ResultContext ctx = new BasicResultContext(rb);
rsp.addResponse(ctx);
return;
}
// -1 as flag if not set.
long timeAllowed = params.getLong(CommonParams.TIME_ALLOWED, -1L);
if (null != rb.getCursorMark() && 0 < timeAllowed) {
// fundamentally incompatible
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not search using both " + CursorMarkParams.CURSOR_MARK_PARAM + " and " + CommonParams.TIME_ALLOWED);
}
QueryCommand cmd = rb.getQueryCommand();
cmd.setTimeAllowed(timeAllowed);
req.getContext().put(SolrIndexSearcher.STATS_SOURCE, statsCache.get(req));
QueryResult result = new QueryResult();
cmd.setSegmentTerminateEarly(params.getBool(CommonParams.SEGMENT_TERMINATE_EARLY, CommonParams.SEGMENT_TERMINATE_EARLY_DEFAULT));
if (cmd.getSegmentTerminateEarly()) {
result.setSegmentTerminatedEarly(Boolean.FALSE);
}
//
// grouping / field collapsing
//
GroupingSpecification groupingSpec = rb.getGroupingSpec();
if (groupingSpec != null) {
// not supported, silently ignore any segmentTerminateEarly flag
cmd.setSegmentTerminateEarly(false);
try {
boolean needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
if (params.getBool(GroupParams.GROUP_DISTRIBUTED_FIRST, false)) {
CommandHandler.Builder topsGroupsActionBuilder = new CommandHandler.Builder().setQueryCommand(cmd).setNeedDocSet(// Order matters here
false).setIncludeHitCount(true).setSearcher(searcher);
for (String field : groupingSpec.getFields()) {
topsGroupsActionBuilder.addCommandField(new SearchGroupsFieldCommand.Builder().setField(schema.getField(field)).setGroupSort(groupingSpec.getGroupSort()).setTopNGroups(cmd.getOffset() + cmd.getLen()).setIncludeGroupCount(groupingSpec.isIncludeGroupCount()).build());
}
CommandHandler commandHandler = topsGroupsActionBuilder.build();
commandHandler.execute();
SearchGroupsResultTransformer serializer = new SearchGroupsResultTransformer(searcher);
rsp.add("firstPhase", commandHandler.processResult(result, serializer));
rsp.add("totalHitCount", commandHandler.getTotalHitCount());
rb.setResult(result);
return;
} else if (params.getBool(GroupParams.GROUP_DISTRIBUTED_SECOND, false)) {
CommandHandler.Builder secondPhaseBuilder = new CommandHandler.Builder().setQueryCommand(cmd).setTruncateGroups(groupingSpec.isTruncateGroups() && groupingSpec.getFields().length > 0).setSearcher(searcher);
int docsToCollect = Grouping.getMax(groupingSpec.getWithinGroupOffset(), groupingSpec.getWithinGroupLimit(), searcher.maxDoc());
docsToCollect = Math.max(docsToCollect, 1);
for (String field : groupingSpec.getFields()) {
SchemaField schemaField = schema.getField(field);
String[] topGroupsParam = params.getParams(GroupParams.GROUP_DISTRIBUTED_TOPGROUPS_PREFIX + field);
if (topGroupsParam == null) {
topGroupsParam = new String[0];
}
List<SearchGroup<BytesRef>> topGroups = new ArrayList<>(topGroupsParam.length);
for (String topGroup : topGroupsParam) {
SearchGroup<BytesRef> searchGroup = new SearchGroup<>();
if (!topGroup.equals(TopGroupsShardRequestFactory.GROUP_NULL_VALUE)) {
BytesRefBuilder builder = new BytesRefBuilder();
schemaField.getType().readableToIndexed(topGroup, builder);
searchGroup.groupValue = builder.get();
}
topGroups.add(searchGroup);
}
secondPhaseBuilder.addCommandField(new TopGroupsFieldCommand.Builder().setField(schemaField).setGroupSort(groupingSpec.getGroupSort()).setSortWithinGroup(groupingSpec.getSortWithinGroup()).setFirstPhaseGroups(topGroups).setMaxDocPerGroup(docsToCollect).setNeedScores(needScores).setNeedMaxScore(needScores).build());
}
for (String query : groupingSpec.getQueries()) {
secondPhaseBuilder.addCommandField(new Builder().setDocsToCollect(docsToCollect).setSort(groupingSpec.getGroupSort()).setQuery(query, rb.req).setDocSet(searcher).build());
}
CommandHandler commandHandler = secondPhaseBuilder.build();
commandHandler.execute();
TopGroupsResultTransformer serializer = new TopGroupsResultTransformer(rb);
rsp.add("secondPhase", commandHandler.processResult(result, serializer));
rb.setResult(result);
return;
}
int maxDocsPercentageToCache = params.getInt(GroupParams.GROUP_CACHE_PERCENTAGE, 0);
boolean cacheSecondPassSearch = maxDocsPercentageToCache >= 1 && maxDocsPercentageToCache <= 100;
Grouping.TotalCount defaultTotalCount = groupingSpec.isIncludeGroupCount() ? Grouping.TotalCount.grouped : Grouping.TotalCount.ungrouped;
// this is normally from "rows"
int limitDefault = cmd.getLen();
Grouping grouping = new Grouping(searcher, result, cmd, cacheSecondPassSearch, maxDocsPercentageToCache, groupingSpec.isMain());
grouping.setGroupSort(groupingSpec.getGroupSort()).setWithinGroupSort(groupingSpec.getSortWithinGroup()).setDefaultFormat(groupingSpec.getResponseFormat()).setLimitDefault(limitDefault).setDefaultTotalCount(defaultTotalCount).setDocsPerGroupDefault(groupingSpec.getWithinGroupLimit()).setGroupOffsetDefault(groupingSpec.getWithinGroupOffset()).setGetGroupedDocSet(groupingSpec.isTruncateGroups());
if (groupingSpec.getFields() != null) {
for (String field : groupingSpec.getFields()) {
grouping.addFieldCommand(field, rb.req);
}
}
if (groupingSpec.getFunctions() != null) {
for (String groupByStr : groupingSpec.getFunctions()) {
grouping.addFunctionCommand(groupByStr, rb.req);
}
}
if (groupingSpec.getQueries() != null) {
for (String groupByStr : groupingSpec.getQueries()) {
grouping.addQueryCommand(groupByStr, rb.req);
}
}
if (rb.isNeedDocList() || rb.isDebug()) {
// we need a single list of the returned docs
cmd.setFlags(SolrIndexSearcher.GET_DOCLIST);
}
grouping.execute();
if (grouping.isSignalCacheWarning()) {
rsp.add("cacheWarning", String.format(Locale.ROOT, "Cache limit of %d percent relative to maxdoc has exceeded. Please increase cache size or disable caching.", maxDocsPercentageToCache));
}
rb.setResult(result);
if (grouping.mainResult != null) {
ResultContext ctx = new BasicResultContext(rb, grouping.mainResult);
rsp.addResponse(ctx);
rsp.getToLog().add("hits", grouping.mainResult.matches());
} else if (!grouping.getCommands().isEmpty()) {
// Can never be empty since grouping.execute() checks for this.
rsp.add("grouped", result.groupedResults);
rsp.getToLog().add("hits", grouping.getCommands().get(0).getMatches());
}
return;
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
// normal search result
searcher.search(result, cmd);
rb.setResult(result);
ResultContext ctx = new BasicResultContext(rb);
rsp.addResponse(ctx);
rsp.getToLog().add("hits", rb.getResults().docList.matches());
if (!rb.req.getParams().getBool(ShardParams.IS_SHARD, false)) {
if (null != rb.getNextCursorMark()) {
rb.rsp.add(CursorMarkParams.CURSOR_MARK_NEXT, rb.getNextCursorMark().getSerializedTotem());
}
}
if (rb.mergeFieldHandler != null) {
rb.mergeFieldHandler.handleMergeFields(rb, searcher);
} else {
doFieldSortValues(rb, searcher);
}
doPrefetch(rb);
}
Aggregations