use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class HighlightComponent method prepare.
@Override
public void prepare(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
rb.doHighlights = solrConfigHighlighter.isHighlightingEnabled(params);
if (rb.doHighlights) {
rb.setNeedDocList(true);
String hlq = params.get(HighlightParams.Q);
String hlparser = Objects.firstNonNull(params.get(HighlightParams.QPARSER), params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE));
if (hlq != null) {
try {
QParser parser = QParser.getParser(hlq, hlparser, rb.req);
rb.setHighlightQuery(parser.getHighlightQuery());
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
}
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class FacetRangeParser method parse.
@Override
public FacetQuery parse(Object arg) throws SyntaxError {
parseCommonParams(arg);
String qstring = null;
if (arg instanceof String) {
// just the field name...
qstring = (String) arg;
} else if (arg instanceof Map) {
Map<String, Object> m = (Map<String, Object>) arg;
qstring = getString(m, "q", null);
if (qstring == null) {
qstring = getString(m, "query", null);
}
// OK to parse subs before we have parsed our own query?
// as long as subs don't need to know about it.
parseSubs(m.get("facet"));
}
if (qstring != null) {
QParser parser = QParser.getParser(qstring, getSolrRequest());
parser.setIsFilter(true);
facet.q = parser.getQuery();
}
return facet;
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class SolrPluginUtilsTest method testDisjunctionMaxQueryParser.
@Test
public void testDisjunctionMaxQueryParser() throws Exception {
Query out;
String t;
SolrQueryRequest req = req("df", "text");
QParser qparser = QParser.getParser("hi", "dismax", req);
DisjunctionMaxQueryParser qp = new SolrPluginUtils.DisjunctionMaxQueryParser(qparser, req.getParams().get("df"));
qp.addAlias("hoss", 0.01f, SolrPluginUtils.parseFieldBoosts("title^2.0 title_stemmed name^1.2 subject^0.5"));
qp.addAlias("test", 0.01f, SolrPluginUtils.parseFieldBoosts("text^2.0"));
qp.addAlias("unused", 1.0f, SolrPluginUtils.parseFieldBoosts("subject^0.5 sind^1.5"));
/* first some sanity tests that don't use aliasing at all */
t = "XXXXXXXX";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't TermQuery: " + out.getClass(), out instanceof TermQuery);
assertEquals(t + " sanity test is wrong field", qp.getDefaultField(), ((TermQuery) out).getTerm().field());
t = "subject:XXXXXXXX";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't TermQuery: " + out.getClass(), out instanceof TermQuery);
assertEquals(t + " sanity test is wrong field", "subject", ((TermQuery) out).getTerm().field());
/* field has untokenzied type, so this should be a term anyway */
t = "sind:\"simple phrase\"";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't TermQuery: " + out.getClass(), out instanceof TermQuery);
assertEquals(t + " sanity test is wrong field", "sind", ((TermQuery) out).getTerm().field());
t = "subject:\"simple phrase\"";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't PhraseQuery: " + out.getClass(), out instanceof PhraseQuery);
assertEquals(t + " sanity test is wrong field", "subject", ((PhraseQuery) out).getTerms()[0].field());
/* now some tests that use aliasing */
/* basic usage of single "term" */
t = "hoss:XXXXXXXX";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a DMQ:" + out.getClass(), out instanceof DisjunctionMaxQuery);
assertEquals(t + " wrong number of clauses", 4, countItems(((DisjunctionMaxQuery) out).iterator()));
/* odd case, but should still work, DMQ of one clause */
t = "test:YYYYY";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a DMQ:" + out.getClass(), out instanceof DisjunctionMaxQuery);
assertEquals(t + " wrong number of clauses", 1, countItems(((DisjunctionMaxQuery) out).iterator()));
/* basic usage of multiple "terms" */
t = "hoss:XXXXXXXX test:YYYYY";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a boolean:" + out.getClass(), out instanceof BooleanQuery);
{
BooleanQuery bq = (BooleanQuery) out;
List<BooleanClause> clauses = new ArrayList<>(bq.clauses());
assertEquals(t + " wrong number of clauses", 2, clauses.size());
Query sub = clauses.get(0).getQuery();
assertTrue(t + " first wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " first had wrong number of clauses", 4, countItems(((DisjunctionMaxQuery) sub).iterator()));
sub = clauses.get(1).getQuery();
assertTrue(t + " second wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " second had wrong number of clauses", 1, countItems(((DisjunctionMaxQuery) sub).iterator()));
}
/* a phrase, and a term that is a stop word for some fields */
t = "hoss:\"XXXXXX YYYYY\" hoss:the";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a boolean:" + out.getClass(), out instanceof BooleanQuery);
{
BooleanQuery bq = (BooleanQuery) out;
List<BooleanClause> clauses = new ArrayList<>(bq.clauses());
assertEquals(t + " wrong number of clauses", 2, clauses.size());
Query sub = clauses.get(0).getQuery();
assertTrue(t + " first wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " first had wrong number of clauses", 4, countItems(((DisjunctionMaxQuery) sub).iterator()));
sub = clauses.get(1).getQuery();
assertTrue(t + " second wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " second had wrong number of clauses (stop words)", 2, countItems(((DisjunctionMaxQuery) sub).iterator()));
}
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class RealTimeGetComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
CloudDescriptor cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
if (cloudDesc != null) {
Replica.Type replicaType = cloudDesc.getReplicaType();
if (replicaType != null) {
if (replicaType == Replica.Type.PULL) {
throw new SolrException(ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", cloudDesc.getCoreNodeName(), Replica.Type.PULL));
}
// non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
}
}
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
// This seems rather kludgey, may there is better way to indicate
// that replica can support handling version ranges
String val = params.get("checkCanHandleVersionRanges");
if (val != null) {
rb.rsp.add("canHandleVersionRanges", true);
return;
}
val = params.get("getFingerprint");
if (val != null) {
processGetFingeprint(rb);
return;
}
val = params.get("getVersions");
if (val != null) {
processGetVersions(rb);
return;
}
val = params.get("getUpdates");
if (val != null) {
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = req.getCore().getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(req.getCore().getCoreContainer().getZkController().getNodeName() + " min count to sync to (from most recent searcher view) " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
processGetUpdates(rb);
return;
}
val = params.get("getInputDocument");
if (val != null) {
processGetInputDocument(rb);
return;
}
final IdsRequsted reqIds = IdsRequsted.parseParams(req);
if (reqIds.allIds.isEmpty()) {
return;
}
// parse any existing filters
try {
String[] fqs = req.getParams().getParams(CommonParams.FQ);
if (fqs != null && fqs.length != 0) {
List<Query> filters = rb.getFilters();
// if filters already exists, make a copy instead of modifying the original
filters = filters == null ? new ArrayList<Query>(fqs.length) : new ArrayList<>(filters);
for (String fq : fqs) {
if (fq != null && fq.trim().length() != 0) {
QParser fqp = QParser.getParser(fq, req);
filters.add(fqp.getQuery());
}
}
if (!filters.isEmpty()) {
rb.setFilters(filters);
}
}
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
final SolrCore core = req.getCore();
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
FieldType fieldType = idField.getType();
SolrDocumentList docList = new SolrDocumentList();
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
SearcherInfo searcherInfo = new SearcherInfo(core);
// this is initialized & set on the context *after* any searcher (re-)opening
ResultContext resultContext = null;
final DocTransformer transformer = rsp.getReturnFields().getTransformer();
// true in any situation where we have to use a realtime searcher rather then returning docs
// directly from the UpdateLog
final boolean mustUseRealtimeSearcher = // if we have filters, we need to check those against the indexed form of the doc
(rb.getFilters() != null) || ((null != transformer) && transformer.needsSolrIndexSearcher());
try {
BytesRefBuilder idBytes = new BytesRefBuilder();
for (String idStr : reqIds.allIds) {
fieldType.readableToIndexed(idStr, idBytes);
if (ulog != null) {
Object o = ulog.lookup(idBytes.get());
if (o != null) {
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List) o;
assert entry.size() >= 3;
int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
switch(oper) {
// fall through to ADD
case UpdateLog.UPDATE_INPLACE:
case UpdateLog.ADD:
if (mustUseRealtimeSearcher) {
// close handles to current searchers & result context
searcherInfo.clear();
resultContext = null;
// force open a new realtime searcher
ulog.openRealtimeSearcher();
// pretend we never found this record and fall through to use the searcher
o = null;
break;
}
SolrDocument doc;
if (oper == UpdateLog.ADD) {
doc = toSolrDoc((SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
} else if (oper == UpdateLog.UPDATE_INPLACE) {
assert entry.size() == 5;
// For in-place update case, we have obtained the partial document till now. We need to
// resolve it to a full document to be returned to the user.
doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument) entry.get(entry.size() - 1), entry, null);
if (doc == null) {
// document has been deleted as the resolve was going on
break;
}
} else {
throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
}
if (transformer != null) {
// unknown docID
transformer.transform(doc, -1, 0);
}
docList.add(doc);
break;
case UpdateLog.DELETE:
break;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
}
if (o != null)
continue;
}
}
// didn't find it in the update log, so it should be in the newest searcher opened
searcherInfo.init();
// don't bother with ResultContext yet, we won't need it if doc doesn't match filters
int docid = -1;
long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
if (segAndId >= 0) {
int segid = (int) segAndId;
LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
docid = segid + ctx.docBase;
if (rb.getFilters() != null) {
for (Query raw : rb.getFilters()) {
Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
Scorer scorer = searcherInfo.getSearcher().createWeight(q, false, 1f).scorer(ctx);
if (scorer == null || segid != scorer.iterator().advance(segid)) {
// filter doesn't match.
docid = -1;
break;
}
}
}
}
if (docid < 0)
continue;
Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
if (null != transformer) {
if (null == resultContext) {
// either first pass, or we've re-opened searcher - either way now we setContext
resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
transformer.setContext(resultContext);
}
transformer.transform(doc, docid, 0);
}
docList.add(doc);
}
} finally {
searcherInfo.clear();
}
addDocListToResponse(rb, docList);
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class FacetProcessor method handleFilterExclusions.
private void handleFilterExclusions() throws IOException {
List<String> excludeTags = freq.domain.excludeTags;
if (excludeTags == null || excludeTags.size() == 0) {
return;
}
// TODO: somehow remove responsebuilder dependency
ResponseBuilder rb = SolrRequestInfo.getRequestInfo().getResponseBuilder();
Map tagMap = (Map) rb.req.getContext().get("tags");
if (tagMap == null) {
// no filters were tagged
return;
}
IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
for (String excludeTag : excludeTags) {
Object olst = tagMap.get(excludeTag);
// tagMap has entries of List<String,List<QParser>>, but subject to change in the future
if (!(olst instanceof Collection))
continue;
for (Object o : (Collection<?>) olst) {
if (!(o instanceof QParser))
continue;
QParser qp = (QParser) o;
try {
excludeSet.put(qp.getQuery(), Boolean.TRUE);
} catch (SyntaxError syntaxError) {
// This should not happen since we should only be retrieving a previously parsed query
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
}
}
if (excludeSet.size() == 0)
return;
List<Query> qlist = new ArrayList<>();
// add the base query
if (!excludeSet.containsKey(rb.getQuery())) {
qlist.add(rb.getQuery());
}
// add the filters
if (rb.getFilters() != null) {
for (Query q : rb.getFilters()) {
if (!excludeSet.containsKey(q)) {
qlist.add(q);
}
}
}
// TODO: we lose parent exclusions...
for (FacetContext curr = fcontext; curr != null; curr = curr.parent) {
if (curr.filter != null) {
qlist.add(curr.filter);
}
}
// recompute the base domain
fcontext.base = fcontext.searcher.getDocSet(qlist);
}
Aggregations