use of org.apache.solr.handler.component.ResponseBuilder in project lucene-solr by apache.
the class MergeStrategyTest method test.
@Test
@ShardsFixed(num = 3)
public void test() throws Exception {
del("*:*");
index_specific(0, "id", "1", "sort_i", "5");
index_specific(0, "id", "2", "sort_i", "50");
index_specific(1, "id", "5", "sort_i", "4");
index_specific(1, "id", "6", "sort_i", "10");
index_specific(0, "id", "7", "sort_i", "1");
index_specific(1, "id", "8", "sort_i", "2");
index_specific(2, "id", "9", "sort_i", "1000");
index_specific(2, "id", "10", "sort_i", "1500");
index_specific(2, "id", "11", "sort_i", "1300");
index_specific(1, "id", "12", "sort_i", "15");
index_specific(1, "id", "13", "sort_i", "16");
commit();
handle.put("explain", SKIPVAL);
handle.put("timestamp", SKIPVAL);
handle.put("score", SKIPVAL);
handle.put("wt", SKIP);
handle.put("distrib", SKIP);
handle.put("shards.qt", SKIP);
handle.put("shards", SKIP);
handle.put("q", SKIP);
handle.put("maxScore", SKIPVAL);
handle.put("_version_", SKIP);
//Test mergeStrategy that uses score
query("rq", "{!rank}", "q", "*:*", "rows", "12", "sort", "sort_i asc", "fl", "*,score");
//Test without mergeStrategy
query("q", "*:*", "rows", "12", "sort", "sort_i asc");
//Test mergeStrategy1 that uses a sort field.
query("rq", "{!rank mergeStrategy=1}", "q", "*:*", "rows", "12", "sort", "sort_i asc");
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "*:*");
params.add("rows", "12");
params.add("rq", "{!rank}");
params.add("sort", "sort_i asc");
params.add("fl", "*,score");
setDistributedParams(params);
QueryResponse rsp = queryServer(params);
assertOrder(rsp, "10", "11", "9", "2", "13", "12", "6", "1", "5", "8", "7");
params = new ModifiableSolrParams();
params.add("q", "*:*");
params.add("rows", "12");
params.add("sort", "sort_i asc");
params.add("fl", "*,score");
setDistributedParams(params);
rsp = queryServer(params);
assertOrder(rsp, "7", "8", "5", "1", "6", "12", "13", "2", "9", "11", "10");
MergeStrategy m1 = new MergeStrategy() {
@Override
public void merge(ResponseBuilder rb, ShardRequest sreq) {
}
public boolean mergesIds() {
return true;
}
public boolean handlesMergeFields() {
return false;
}
public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) {
}
@Override
public int getCost() {
return 1;
}
};
MergeStrategy m2 = new MergeStrategy() {
@Override
public void merge(ResponseBuilder rb, ShardRequest sreq) {
}
public boolean mergesIds() {
return true;
}
public boolean handlesMergeFields() {
return false;
}
public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) {
}
@Override
public int getCost() {
return 100;
}
};
MergeStrategy m3 = new MergeStrategy() {
@Override
public void merge(ResponseBuilder rb, ShardRequest sreq) {
}
public boolean mergesIds() {
return false;
}
public boolean handlesMergeFields() {
return false;
}
public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher) {
}
@Override
public int getCost() {
return 50;
}
};
MergeStrategy[] merges = { m1, m2, m3 };
Arrays.sort(merges, MergeStrategy.MERGE_COMP);
assert (merges[0].getCost() == 1);
assert (merges[1].getCost() == 50);
assert (merges[2].getCost() == 100);
}
use of org.apache.solr.handler.component.ResponseBuilder in project lucene-solr by apache.
the class HighlighterTest method payloadFilteringSpanQuery.
@Test
public void payloadFilteringSpanQuery() throws IOException {
clearIndex();
String FIELD_NAME = "payloadDelimited";
assertU(adoc("id", "0", FIELD_NAME, "word|7 word|2"));
assertU(commit());
//We search at a lower level than typical Solr tests because there's no QParser for payloads
//Create query matching this payload
Query query = new SpanPayloadCheckQuery(new SpanTermQuery(new Term(FIELD_NAME, "word")), //bytes for integer 7
Collections.singletonList(new BytesRef(new byte[] { 0, 0, 0, 7 })));
//invoke highlight component... the hard way
final SearchComponent hlComp = h.getCore().getSearchComponent("highlight");
SolrQueryRequest req = req("hl", "true", "hl.fl", FIELD_NAME, HighlightParams.USE_PHRASE_HIGHLIGHTER, "true");
try {
SolrQueryResponse resp = new SolrQueryResponse();
ResponseBuilder rb = new ResponseBuilder(req, resp, Collections.singletonList(hlComp));
rb.setHighlightQuery(query);
rb.setResults(req.getSearcher().getDocListAndSet(query, (DocSet) null, null, 0, 1));
//highlight:
hlComp.prepare(rb);
hlComp.process(rb);
//inspect response
final String[] snippets = (String[]) resp.getValues().findRecursive("highlighting", "0", FIELD_NAME);
assertEquals("<em>word|7</em> word|2", snippets[0]);
} finally {
req.close();
}
}
use of org.apache.solr.handler.component.ResponseBuilder in project lucene-solr by apache.
the class SpellCheckCollator method collate.
public List<SpellCheckCollation> collate(SpellingResult result, String originalQuery, ResponseBuilder ultimateResponse) {
List<SpellCheckCollation> collations = new ArrayList<>();
QueryComponent queryComponent = null;
if (ultimateResponse.components != null) {
for (SearchComponent sc : ultimateResponse.components) {
if (sc instanceof QueryComponent) {
queryComponent = (QueryComponent) sc;
break;
}
}
}
boolean verifyCandidateWithQuery = true;
int maxTries = maxCollationTries;
int maxNumberToIterate = maxTries;
if (maxTries < 1) {
maxTries = 1;
maxNumberToIterate = maxCollations;
verifyCandidateWithQuery = false;
}
if (queryComponent == null && verifyCandidateWithQuery) {
LOG.info("Could not find an instance of QueryComponent. Disabling collation verification against the index.");
maxTries = 1;
verifyCandidateWithQuery = false;
}
docCollectionLimit = docCollectionLimit > 0 ? docCollectionLimit : 0;
int maxDocId = -1;
if (verifyCandidateWithQuery && docCollectionLimit > 0) {
IndexReader reader = ultimateResponse.req.getSearcher().getIndexReader();
maxDocId = reader.maxDoc();
}
int tryNo = 0;
int collNo = 0;
PossibilityIterator possibilityIter = new PossibilityIterator(result.getSuggestions(), maxNumberToIterate, maxCollationEvaluations, suggestionsMayOverlap);
while (tryNo < maxTries && collNo < maxCollations && possibilityIter.hasNext()) {
PossibilityIterator.RankedSpellPossibility possibility = possibilityIter.next();
String collationQueryStr = getCollation(originalQuery, possibility.corrections);
int hits = 0;
if (verifyCandidateWithQuery) {
tryNo++;
SolrParams origParams = ultimateResponse.req.getParams();
ModifiableSolrParams params = new ModifiableSolrParams(origParams);
Iterator<String> origParamIterator = origParams.getParameterNamesIterator();
int pl = SpellingParams.SPELLCHECK_COLLATE_PARAM_OVERRIDE.length();
while (origParamIterator.hasNext()) {
String origParamName = origParamIterator.next();
if (origParamName.startsWith(SpellingParams.SPELLCHECK_COLLATE_PARAM_OVERRIDE) && origParamName.length() > pl) {
String[] val = origParams.getParams(origParamName);
if (val.length == 1 && val[0].length() == 0) {
params.set(origParamName.substring(pl), (String[]) null);
} else {
params.set(origParamName.substring(pl), val);
}
}
}
params.set(CommonParams.Q, collationQueryStr);
params.remove(CommonParams.START);
params.set(CommonParams.ROWS, "" + docCollectionLimit);
// we don't want any stored fields
params.set(CommonParams.FL, ID);
// we'll sort by doc id to ensure no scoring is done.
params.set(CommonParams.SORT, "_docid_ asc");
// CursorMark does not like _docid_ sorting, and we don't need it.
params.remove(CursorMarkParams.CURSOR_MARK_PARAM);
// If a dismax query, don't add unnecessary clauses for scoring
params.remove(DisMaxParams.TIE);
params.remove(DisMaxParams.PF);
params.remove(DisMaxParams.PF2);
params.remove(DisMaxParams.PF3);
params.remove(DisMaxParams.BQ);
params.remove(DisMaxParams.BF);
// Collate testing does not support Grouping (see SOLR-2577)
params.remove(GroupParams.GROUP);
// Collate testing does not support the Collapse QParser (See SOLR-8807)
params.remove("expand");
String[] filters = params.getParams(CommonParams.FQ);
if (filters != null) {
List<String> filtersToApply = new ArrayList<>(filters.length);
for (String fq : filters) {
if (!fq.startsWith("{!collapse")) {
filtersToApply.add(fq);
}
}
params.set("fq", filtersToApply.toArray(new String[filtersToApply.size()]));
}
// creating a request here... make sure to close it!
ResponseBuilder checkResponse = new ResponseBuilder(new LocalSolrQueryRequest(ultimateResponse.req.getCore(), params), new SolrQueryResponse(), Arrays.<SearchComponent>asList(queryComponent));
checkResponse.setQparser(ultimateResponse.getQparser());
checkResponse.setFilters(ultimateResponse.getFilters());
checkResponse.setQueryString(collationQueryStr);
checkResponse.components = Arrays.<SearchComponent>asList(queryComponent);
try {
queryComponent.prepare(checkResponse);
if (docCollectionLimit > 0) {
int f = checkResponse.getFieldFlags();
checkResponse.setFieldFlags(f |= SolrIndexSearcher.TERMINATE_EARLY);
}
queryComponent.process(checkResponse);
hits = (Integer) checkResponse.rsp.getToLog().get("hits");
} catch (EarlyTerminatingCollectorException etce) {
assert (docCollectionLimit > 0);
assert 0 < etce.getNumberScanned();
assert 0 < etce.getNumberCollected();
if (etce.getNumberScanned() == maxDocId) {
hits = etce.getNumberCollected();
} else {
hits = (int) (((float) (maxDocId * etce.getNumberCollected())) / (float) etce.getNumberScanned());
}
} catch (Exception e) {
LOG.warn("Exception trying to re-query to check if a spell check possibility would return any hits.", e);
} finally {
checkResponse.req.close();
}
}
if (hits > 0 || !verifyCandidateWithQuery) {
collNo++;
SpellCheckCollation collation = new SpellCheckCollation();
collation.setCollationQuery(collationQueryStr);
collation.setHits(hits);
collation.setInternalRank(suggestionsMayOverlap ? ((possibility.rank * 1000) + possibility.index) : possibility.rank);
NamedList<String> misspellingsAndCorrections = new NamedList<>();
for (SpellCheckCorrection corr : possibility.corrections) {
misspellingsAndCorrections.add(corr.getOriginal().toString(), corr.getCorrection());
}
collation.setMisspellingsAndCorrections(misspellingsAndCorrections);
collations.add(collation);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Collation: " + collationQueryStr + (verifyCandidateWithQuery ? (" will return " + hits + " hits.") : ""));
}
}
return collations;
}
use of org.apache.solr.handler.component.ResponseBuilder in project lucene-solr by apache.
the class AnalyticsQuery method getFilterCollector.
public DelegatingCollector getFilterCollector(IndexSearcher searcher) {
SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
ResponseBuilder rb = null;
if (info != null) {
rb = info.getResponseBuilder();
}
if (rb == null) {
//This is the autowarming case.
return new DelegatingCollector();
} else {
return getAnalyticsCollector(rb, searcher);
}
}
use of org.apache.solr.handler.component.ResponseBuilder in project lucene-solr by apache.
the class FacetProcessor method handleFilterExclusions.
private void handleFilterExclusions() throws IOException {
List<String> excludeTags = freq.domain.excludeTags;
if (excludeTags == null || excludeTags.size() == 0) {
return;
}
// TODO: somehow remove responsebuilder dependency
ResponseBuilder rb = SolrRequestInfo.getRequestInfo().getResponseBuilder();
Map tagMap = (Map) rb.req.getContext().get("tags");
if (tagMap == null) {
// no filters were tagged
return;
}
IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
for (String excludeTag : excludeTags) {
Object olst = tagMap.get(excludeTag);
// tagMap has entries of List<String,List<QParser>>, but subject to change in the future
if (!(olst instanceof Collection))
continue;
for (Object o : (Collection<?>) olst) {
if (!(o instanceof QParser))
continue;
QParser qp = (QParser) o;
try {
excludeSet.put(qp.getQuery(), Boolean.TRUE);
} catch (SyntaxError syntaxError) {
// This should not happen since we should only be retrieving a previously parsed query
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
}
}
if (excludeSet.size() == 0)
return;
List<Query> qlist = new ArrayList<>();
// add the base query
if (!excludeSet.containsKey(rb.getQuery())) {
qlist.add(rb.getQuery());
}
// add the filters
if (rb.getFilters() != null) {
for (Query q : rb.getFilters()) {
if (!excludeSet.containsKey(q)) {
qlist.add(q);
}
}
}
// TODO: we lose parent exclusions...
for (FacetContext curr = fcontext; curr != null; curr = curr.parent) {
if (curr.filter != null) {
qlist.add(curr.filter);
}
}
// recompute the base domain
fcontext.base = fcontext.searcher.getDocSet(qlist);
}
Aggregations