use of org.apache.solr.response.ResultContext in project SearchServices by Alfresco.
the class AlfrescoReRankQParserPluginTest method testScale.
@Test
public void testScale() throws Exception {
assertU(delQ("*:*"));
assertU(commit());
String[] doc = { "id", "1", "term_s", "YYYY", "group_s", "group1", "test_ti", "5", "test_tl", "10", "test_tf", "2000" };
assertU(adoc(doc));
assertU(commit());
String[] doc1 = { "id", "2", "term_s", "YYYY", "group_s", "group1", "test_ti", "50", "test_tl", "100", "test_tf", "200" };
assertU(adoc(doc1));
String[] doc2 = { "id", "3", "term_s", "YYYY", "test_ti", "5000", "test_tl", "100", "test_tf", "200" };
assertU(adoc(doc2));
assertU(commit());
String[] doc3 = { "id", "4", "term_s", "YYYY", "test_ti", "500", "test_tl", "1000", "test_tf", "2000" };
assertU(adoc(doc3));
String[] doc4 = { "id", "5", "term_s", "YYYY", "group_s", "group2", "test_ti", "4", "test_tl", "10", "test_tf", "2000" };
assertU(adoc(doc4));
assertU(commit());
String[] doc5 = { "id", "6", "term_s", "YYYY", "group_s", "group2", "test_ti", "10", "test_tl", "100", "test_tf", "200" };
assertU(adoc(doc5));
assertU(commit());
// Calculate the scales manually
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("rq", "{!alfrescoReRank reRankQuery=$rqq reRankDocs=200 scale=false}");
params.add("df", "TEXT");
params.add("q", "term_s:YYYY");
params.add("rqq", "{!edismax bf=$bff}id:(1 2 4 5 6)");
params.add("bff", "field(test_ti)");
params.add("fl", "id,score");
params.add("start", "0");
params.add("rows", "6");
SolrQueryRequest req = req(params);
SolrQueryResponse res = null;
try {
res = h.queryAndResponse(null, req);
} finally {
req.close();
}
@SuppressWarnings("rawtypes") NamedList vals = res.getValues();
ResultContext resultContext = (ResultContext) vals.get("response");
DocList docs = resultContext.getDocList();
DocIterator it = docs.iterator();
float max = -Float.MAX_VALUE;
List<Float> scores = new ArrayList<Float>();
while (it.hasNext()) {
it.next();
float score = it.score();
max = Math.max(score, max);
scores.add(score);
}
float[] scaledScores = new float[scores.size()];
for (int i = 0; i < scaledScores.length; i++) {
float score = scores.get(i);
if (i < 5) {
// The first 5 docs are hit on the reRanker so add 1 to score
scaledScores[i] = (score / max) + 1;
} else {
// The last score is not a hit on the reRanker
scaledScores[i] = (score / max);
}
}
// Get the scaled scores from the reRanker
params = new ModifiableSolrParams();
params.add("rq", "{!alfrescoReRank reRankQuery=$rqq reRankDocs=200 scale=true}");
params.add("df", "TEXT");
params.add("q", "term_s:YYYY");
params.add("rqq", "{!edismax bf=$bff}id:(1 2 4 5 6)");
params.add("bff", "field(test_ti)");
params.add("fl", "id,score");
params.add("start", "0");
params.add("rows", "6");
req = req(params);
try {
res = h.queryAndResponse(null, req);
} finally {
req.close();
}
vals = res.getValues();
resultContext = (ResultContext) vals.get("response");
docs = resultContext.getDocList();
it = docs.iterator();
int index = 0;
while (it.hasNext()) {
it.next();
float score = it.score();
float scaledScore = scaledScores[index++];
assertTrue(score == scaledScore);
}
req.close();
}
use of org.apache.solr.response.ResultContext in project SearchServices by Alfresco.
the class AlfrescoSearchHandler method handleRequestBody.
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
readJsonIntoContent(req);
List<SearchComponent> components = getComponents();
ResponseBuilder rb = new ResponseBuilder(req, rsp, components);
if (rb.requestInfo != null) {
rb.requestInfo.setResponseBuilder(rb);
}
boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);
rb.setDebug(dbg);
if (dbg == false) {
// if it's true, we are doing everything anyway.
SolrPluginUtils.getDebugInterests(req.getParams().getParams(CommonParams.DEBUG), rb);
}
final RTimerTree timer = rb.isDebug() ? req.getRequestTimer() : null;
// creates
final ShardHandler shardHandler1 = getAndPrepShardHandler(req, rb);
if (timer == null) {
for (SearchComponent c : components) {
c.prepare(rb);
}
} else {
// debugging prepare phase
RTimerTree subt = timer.sub("prepare");
for (SearchComponent c : components) {
rb.setTimer(subt.sub(c.getName()));
c.prepare(rb);
rb.getTimer().stop();
}
subt.stop();
}
if (!rb.isDistrib) {
// a normal non-distributed request
long timeAllowed = req.getParams().getLong(CommonParams.TIME_ALLOWED, -1L);
if (timeAllowed > 0L) {
SolrQueryTimeoutImpl.set(timeAllowed);
}
try {
// it makes sense to have two control loops
if (!rb.isDebug()) {
// Process
for (SearchComponent c : components) {
c.process(rb);
}
} else {
// Process
RTimerTree subt = timer.sub("process");
for (SearchComponent c : components) {
rb.setTimer(subt.sub(c.getName()));
c.process(rb);
rb.getTimer().stop();
}
subt.stop();
// add the timing info
if (rb.isDebugTimings()) {
rb.addDebugInfo("timing", timer.asNamedList());
}
}
} catch (ExitableDirectoryReader.ExitingReaderException ex) {
log.warn("Query: " + req.getParamString() + "; " + ex.getMessage());
SolrDocumentList r = (SolrDocumentList) rb.rsp.getValues().get("response");
if (r == null)
r = new SolrDocumentList();
r.setNumFound(0);
rb.rsp.add("response", r);
if (rb.isDebug()) {
NamedList debug = new NamedList();
debug.add("explain", new NamedList());
rb.rsp.add("debug", debug);
}
rb.rsp.getResponseHeader().add("partialResults", Boolean.TRUE);
} finally {
SolrQueryTimeoutImpl.reset();
}
if (req.getParams().getBool("alfresco.getSolrDocumentList", false)) {
NamedList values = rsp.getValues();
ResultContext response = (ResultContext) values.get("response");
SolrDocumentList newResponse = new SolrDocumentList();
DocList docs = response.getDocList();
for (DocIterator it = docs.iterator(); it.hasNext(); ) /**/
{
newResponse.add(toSolrDocument(req.getSearcher().doc(it.nextDoc()), req.getSchema()));
}
values.add("responseSolrDocumentList", newResponse);
}
} else {
if (rb.outgoing == null) {
rb.outgoing = new LinkedList<>();
}
rb.finished = new ArrayList<>();
int nextStage = 0;
do {
rb.stage = nextStage;
nextStage = ResponseBuilder.STAGE_DONE;
// call all components
for (SearchComponent c : components) {
// the next stage is the minimum of what all components
// report
nextStage = Math.min(nextStage, c.distributedProcess(rb));
}
// check the outgoing queue and send requests
while (rb.outgoing.size() > 0) {
// submit all current request tasks at once
while (rb.outgoing.size() > 0) {
ShardRequest sreq = rb.outgoing.remove(0);
sreq.actualShards = sreq.shards;
if (sreq.actualShards == ShardRequest.ALL_SHARDS) {
sreq.actualShards = rb.shards;
}
sreq.responses = new ArrayList<>(// presume we'll get
sreq.actualShards.length);
// TODO: map from shard to address[]
for (String shard : sreq.actualShards) {
ModifiableSolrParams params = new ModifiableSolrParams(sreq.params);
// not a
params.remove(ShardParams.SHARDS);
// top-level
// request
// not a
params.set(CommonParams.DISTRIB, "false");
// top-level
// request
params.remove("indent");
params.remove(CommonParams.HEADER_ECHO_PARAMS);
// a sub
params.set(ShardParams.IS_SHARD, true);
// (shard)
// request
params.set(ShardParams.SHARDS_PURPOSE, sreq.purpose);
// so the
params.set(ShardParams.SHARD_URL, shard);
// asked
if (req.getContext().get(AbstractQParser.ALFRESCO_JSON) != null) {
// This will add the Alfresco JSON as a parameter, overwriting the parameter if it already exists.
params.set(AbstractQParser.ALFRESCO_JSON, req.getContext().get(AbstractQParser.ALFRESCO_JSON).toString());
}
if (rb.requestInfo != null) {
// we could try and detect when this is needed,
// but it could be tricky
params.set("NOW", Long.toString(rb.requestInfo.getNOW().getTime()));
}
String shardQt = params.get(ShardParams.SHARDS_QT);
if (shardQt != null) {
params.set(CommonParams.QT, shardQt);
} else {
// prior to 5.1
if (req.getCore().getSolrConfig().luceneMatchVersion.onOrAfter(Version.LUCENE_5_1_0)) {
String reqPath = (String) req.getContext().get(PATH);
if (!"/select".equals(reqPath)) {
params.set(CommonParams.QT, reqPath);
}
// else if path is /select, then the qt
// gets passed thru if set
} else {
// this is the pre-5.1 behavior, which
// translates to sending the shard request
// to /select
params.remove(CommonParams.QT);
}
}
shardHandler1.submit(sreq, shard, params);
}
}
// now wait for replies, but if anyone puts more requests on
// the outgoing queue, send them out immediately (by exiting
// this loop)
boolean tolerant = rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false);
while (rb.outgoing.size() == 0) {
ShardResponse srsp = tolerant ? shardHandler1.takeCompletedIncludingErrors() : shardHandler1.takeCompletedOrError();
if (srsp == null)
// no more requests to wait for
break;
// Was there an exception?
if (srsp.getException() != null) {
// rethrow
if (!tolerant) {
shardHandler1.cancelAll();
if (srsp.getException() instanceof SolrException) {
throw (SolrException) srsp.getException();
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, srsp.getException());
}
} else {
if (rsp.getResponseHeader().get("partialResults") == null) {
rsp.getResponseHeader().add("partialResults", Boolean.TRUE);
}
}
}
rb.finished.add(srsp.getShardRequest());
// let the components see the responses to the request
for (SearchComponent c : components) {
c.handleResponses(rb, srsp.getShardRequest());
}
}
}
for (SearchComponent c : components) {
c.finishStage(rb);
}
// we are done when the next stage is MAX_VALUE
} while (nextStage != Integer.MAX_VALUE);
}
// circuited distrib request
if (!rb.isDistrib && req.getParams().getBool(ShardParams.SHARDS_INFO, false) && rb.shortCircuitedURL != null) {
NamedList<Object> shardInfo = new SimpleOrderedMap<Object>();
SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
if (rsp.getException() != null) {
Throwable cause = rsp.getException();
if (cause instanceof SolrServerException) {
cause = ((SolrServerException) cause).getRootCause();
} else {
if (cause.getCause() != null) {
cause = cause.getCause();
}
}
nl.add("error", cause.toString());
StringWriter trace = new StringWriter();
cause.printStackTrace(new PrintWriter(trace));
nl.add("trace", trace.toString());
} else {
nl.add("numFound", rb.getResults().docList.matches());
nl.add("maxScore", rb.getResults().docList.maxScore());
}
nl.add("shardAddress", rb.shortCircuitedURL);
// elapsed time of
nl.add("time", req.getRequestTimer().getTime());
// this request
// so far
int pos = rb.shortCircuitedURL.indexOf("://");
String shardInfoName = pos != -1 ? rb.shortCircuitedURL.substring(pos + 3) : rb.shortCircuitedURL;
shardInfo.add(shardInfoName, nl);
rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
}
}
use of org.apache.solr.response.ResultContext in project lucene-solr by apache.
the class ResponseLogComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (!params.getBool(COMPONENT_NAME, false))
return;
SolrIndexSearcher searcher = rb.req.getSearcher();
IndexSchema schema = searcher.getSchema();
if (schema.getUniqueKeyField() == null)
return;
ResultContext rc = (ResultContext) rb.rsp.getResponse();
DocList docs = rc.getDocList();
if (docs.hasScores()) {
processScores(rb, docs, schema, searcher);
} else {
processIds(rb, docs, schema, searcher);
}
}
use of org.apache.solr.response.ResultContext in project lucene-solr by apache.
the class RealTimeGetComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
CloudDescriptor cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
if (cloudDesc != null) {
Replica.Type replicaType = cloudDesc.getReplicaType();
if (replicaType != null) {
if (replicaType == Replica.Type.PULL) {
throw new SolrException(ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", cloudDesc.getCoreNodeName(), Replica.Type.PULL));
}
// non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
}
}
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
// This seems rather kludgey, may there is better way to indicate
// that replica can support handling version ranges
String val = params.get("checkCanHandleVersionRanges");
if (val != null) {
rb.rsp.add("canHandleVersionRanges", true);
return;
}
val = params.get("getFingerprint");
if (val != null) {
processGetFingeprint(rb);
return;
}
val = params.get("getVersions");
if (val != null) {
processGetVersions(rb);
return;
}
val = params.get("getUpdates");
if (val != null) {
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = req.getCore().getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(req.getCore().getCoreContainer().getZkController().getNodeName() + " min count to sync to (from most recent searcher view) " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
processGetUpdates(rb);
return;
}
val = params.get("getInputDocument");
if (val != null) {
processGetInputDocument(rb);
return;
}
final IdsRequsted reqIds = IdsRequsted.parseParams(req);
if (reqIds.allIds.isEmpty()) {
return;
}
// parse any existing filters
try {
String[] fqs = req.getParams().getParams(CommonParams.FQ);
if (fqs != null && fqs.length != 0) {
List<Query> filters = rb.getFilters();
// if filters already exists, make a copy instead of modifying the original
filters = filters == null ? new ArrayList<Query>(fqs.length) : new ArrayList<>(filters);
for (String fq : fqs) {
if (fq != null && fq.trim().length() != 0) {
QParser fqp = QParser.getParser(fq, req);
filters.add(fqp.getQuery());
}
}
if (!filters.isEmpty()) {
rb.setFilters(filters);
}
}
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
final SolrCore core = req.getCore();
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
FieldType fieldType = idField.getType();
SolrDocumentList docList = new SolrDocumentList();
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
SearcherInfo searcherInfo = new SearcherInfo(core);
// this is initialized & set on the context *after* any searcher (re-)opening
ResultContext resultContext = null;
final DocTransformer transformer = rsp.getReturnFields().getTransformer();
// true in any situation where we have to use a realtime searcher rather then returning docs
// directly from the UpdateLog
final boolean mustUseRealtimeSearcher = // if we have filters, we need to check those against the indexed form of the doc
(rb.getFilters() != null) || ((null != transformer) && transformer.needsSolrIndexSearcher());
try {
BytesRefBuilder idBytes = new BytesRefBuilder();
for (String idStr : reqIds.allIds) {
fieldType.readableToIndexed(idStr, idBytes);
if (ulog != null) {
Object o = ulog.lookup(idBytes.get());
if (o != null) {
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List) o;
assert entry.size() >= 3;
int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
switch(oper) {
// fall through to ADD
case UpdateLog.UPDATE_INPLACE:
case UpdateLog.ADD:
if (mustUseRealtimeSearcher) {
// close handles to current searchers & result context
searcherInfo.clear();
resultContext = null;
// force open a new realtime searcher
ulog.openRealtimeSearcher();
// pretend we never found this record and fall through to use the searcher
o = null;
break;
}
SolrDocument doc;
if (oper == UpdateLog.ADD) {
doc = toSolrDoc((SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
} else if (oper == UpdateLog.UPDATE_INPLACE) {
assert entry.size() == 5;
// For in-place update case, we have obtained the partial document till now. We need to
// resolve it to a full document to be returned to the user.
doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument) entry.get(entry.size() - 1), entry, null);
if (doc == null) {
// document has been deleted as the resolve was going on
break;
}
} else {
throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
}
if (transformer != null) {
// unknown docID
transformer.transform(doc, -1, 0);
}
docList.add(doc);
break;
case UpdateLog.DELETE:
break;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
}
if (o != null)
continue;
}
}
// didn't find it in the update log, so it should be in the newest searcher opened
searcherInfo.init();
// don't bother with ResultContext yet, we won't need it if doc doesn't match filters
int docid = -1;
long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
if (segAndId >= 0) {
int segid = (int) segAndId;
LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
docid = segid + ctx.docBase;
if (rb.getFilters() != null) {
for (Query raw : rb.getFilters()) {
Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
Scorer scorer = searcherInfo.getSearcher().createWeight(q, false, 1f).scorer(ctx);
if (scorer == null || segid != scorer.iterator().advance(segid)) {
// filter doesn't match.
docid = -1;
break;
}
}
}
}
if (docid < 0)
continue;
Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
if (null != transformer) {
if (null == resultContext) {
// either first pass, or we've re-opened searcher - either way now we setContext
resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
transformer.setContext(resultContext);
}
transformer.transform(doc, docid, 0);
}
docList.add(doc);
}
} finally {
searcherInfo.clear();
}
addDocListToResponse(rb, docList);
}
use of org.apache.solr.response.ResultContext in project lucene-solr by apache.
the class EmbeddedSolrServer method request.
// TODO-- this implementation sends the response to XML and then parses it.
// It *should* be able to convert the response directly into a named list.
@Override
public NamedList<Object> request(SolrRequest request, String coreName) throws SolrServerException, IOException {
String path = request.getPath();
if (path == null || !path.startsWith("/")) {
path = "/select";
}
SolrRequestHandler handler = coreContainer.getRequestHandler(path);
if (handler != null) {
try {
SolrQueryRequest req = _parser.buildRequestFrom(null, request.getParams(), request.getContentStreams());
SolrQueryResponse resp = new SolrQueryResponse();
handler.handleRequest(req, resp);
checkForExceptions(resp);
return BinaryResponseWriter.getParsedResponse(req, resp);
} catch (IOException | SolrException iox) {
throw iox;
} catch (Exception ex) {
throw new SolrServerException(ex);
}
}
if (coreName == null)
coreName = this.coreName;
// Check for cores action
SolrQueryRequest req = null;
try (SolrCore core = coreContainer.getCore(coreName)) {
if (core == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "No such core: " + coreName);
}
SolrParams params = request.getParams();
if (params == null) {
params = new ModifiableSolrParams();
}
// Extract the handler from the path or params
handler = core.getRequestHandler(path);
if (handler == null) {
if ("/select".equals(path) || "/select/".equalsIgnoreCase(path)) {
String qt = params.get(CommonParams.QT);
handler = core.getRequestHandler(qt);
if (handler == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + qt);
}
}
}
if (handler == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + path);
}
req = _parser.buildRequestFrom(core, params, request.getContentStreams());
req.getContext().put(PATH, path);
req.getContext().put("httpMethod", request.getMethod().name());
SolrQueryResponse rsp = new SolrQueryResponse();
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
core.execute(handler, req, rsp);
checkForExceptions(rsp);
// Check if this should stream results
if (request.getStreamingResponseCallback() != null) {
try {
final StreamingResponseCallback callback = request.getStreamingResponseCallback();
BinaryResponseWriter.Resolver resolver = new BinaryResponseWriter.Resolver(req, rsp.getReturnFields()) {
@Override
public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOException {
// write an empty list...
SolrDocumentList docs = new SolrDocumentList();
docs.setNumFound(ctx.getDocList().matches());
docs.setStart(ctx.getDocList().offset());
docs.setMaxScore(ctx.getDocList().maxScore());
codec.writeSolrDocumentList(docs);
// This will transform
writeResultsBody(ctx, codec);
}
};
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
createJavaBinCodec(callback, resolver).setWritableDocFields(resolver).marshal(rsp.getValues(), out);
try (InputStream in = out.toInputStream()) {
return (NamedList<Object>) new JavaBinCodec(resolver).unmarshal(in);
}
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
// Now write it out
NamedList<Object> normalized = BinaryResponseWriter.getParsedResponse(req, rsp);
return normalized;
} catch (IOException | SolrException iox) {
throw iox;
} catch (Exception ex) {
throw new SolrServerException(ex);
} finally {
if (req != null)
req.close();
SolrRequestInfo.clearRequestInfo();
}
}
Aggregations