use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class SolrPluginUtilsTest method testDisjunctionMaxQueryParser.
@Test
public void testDisjunctionMaxQueryParser() throws Exception {
Query out;
String t;
SolrQueryRequest req = req("df", "text");
QParser qparser = QParser.getParser("hi", "dismax", req);
DisjunctionMaxQueryParser qp = new SolrPluginUtils.DisjunctionMaxQueryParser(qparser, req.getParams().get("df"));
qp.addAlias("hoss", 0.01f, SolrPluginUtils.parseFieldBoosts("title^2.0 title_stemmed name^1.2 subject^0.5"));
qp.addAlias("test", 0.01f, SolrPluginUtils.parseFieldBoosts("text^2.0"));
qp.addAlias("unused", 1.0f, SolrPluginUtils.parseFieldBoosts("subject^0.5 sind^1.5"));
/* first some sanity tests that don't use aliasing at all */
t = "XXXXXXXX";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't TermQuery: " + out.getClass(), out instanceof TermQuery);
assertEquals(t + " sanity test is wrong field", qp.getDefaultField(), ((TermQuery) out).getTerm().field());
t = "subject:XXXXXXXX";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't TermQuery: " + out.getClass(), out instanceof TermQuery);
assertEquals(t + " sanity test is wrong field", "subject", ((TermQuery) out).getTerm().field());
/* field has untokenzied type, so this should be a term anyway */
t = "sind:\"simple phrase\"";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't TermQuery: " + out.getClass(), out instanceof TermQuery);
assertEquals(t + " sanity test is wrong field", "sind", ((TermQuery) out).getTerm().field());
t = "subject:\"simple phrase\"";
out = qp.parse(t);
assertNotNull(t + " sanity test gave back null", out);
assertTrue(t + " sanity test isn't PhraseQuery: " + out.getClass(), out instanceof PhraseQuery);
assertEquals(t + " sanity test is wrong field", "subject", ((PhraseQuery) out).getTerms()[0].field());
/* now some tests that use aliasing */
/* basic usage of single "term" */
t = "hoss:XXXXXXXX";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a DMQ:" + out.getClass(), out instanceof DisjunctionMaxQuery);
assertEquals(t + " wrong number of clauses", 4, countItems(((DisjunctionMaxQuery) out).iterator()));
/* odd case, but should still work, DMQ of one clause */
t = "test:YYYYY";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a DMQ:" + out.getClass(), out instanceof DisjunctionMaxQuery);
assertEquals(t + " wrong number of clauses", 1, countItems(((DisjunctionMaxQuery) out).iterator()));
/* basic usage of multiple "terms" */
t = "hoss:XXXXXXXX test:YYYYY";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a boolean:" + out.getClass(), out instanceof BooleanQuery);
{
BooleanQuery bq = (BooleanQuery) out;
List<BooleanClause> clauses = new ArrayList<>(bq.clauses());
assertEquals(t + " wrong number of clauses", 2, clauses.size());
Query sub = clauses.get(0).getQuery();
assertTrue(t + " first wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " first had wrong number of clauses", 4, countItems(((DisjunctionMaxQuery) sub).iterator()));
sub = clauses.get(1).getQuery();
assertTrue(t + " second wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " second had wrong number of clauses", 1, countItems(((DisjunctionMaxQuery) sub).iterator()));
}
/* a phrase, and a term that is a stop word for some fields */
t = "hoss:\"XXXXXX YYYYY\" hoss:the";
out = qp.parse(t);
assertNotNull(t + " was null", out);
assertTrue(t + " wasn't a boolean:" + out.getClass(), out instanceof BooleanQuery);
{
BooleanQuery bq = (BooleanQuery) out;
List<BooleanClause> clauses = new ArrayList<>(bq.clauses());
assertEquals(t + " wrong number of clauses", 2, clauses.size());
Query sub = clauses.get(0).getQuery();
assertTrue(t + " first wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " first had wrong number of clauses", 4, countItems(((DisjunctionMaxQuery) sub).iterator()));
sub = clauses.get(1).getQuery();
assertTrue(t + " second wasn't a DMQ:" + sub.getClass(), sub instanceof DisjunctionMaxQuery);
assertEquals(t + " second had wrong number of clauses (stop words)", 2, countItems(((DisjunctionMaxQuery) sub).iterator()));
}
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class FacetRangeParser method parse.
@Override
public FacetQuery parse(Object arg) throws SyntaxError {
parseCommonParams(arg);
String qstring = null;
if (arg instanceof String) {
// just the field name...
qstring = (String) arg;
} else if (arg instanceof Map) {
Map<String, Object> m = (Map<String, Object>) arg;
qstring = getString(m, "q", null);
if (qstring == null) {
qstring = getString(m, "query", null);
}
// OK to parse subs before we have parsed our own query?
// as long as subs don't need to know about it.
parseSubs(m.get("facet"));
}
if (qstring != null) {
QParser parser = QParser.getParser(qstring, getSolrRequest());
parser.setIsFilter(true);
facet.q = parser.getQuery();
}
return facet;
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class FacetProcessor method handleBlockJoin.
// returns "true" if filters were applied to fcontext.base already
private boolean handleBlockJoin() throws IOException {
boolean appliedFilters = false;
if (!(freq.domain.toChildren || freq.domain.toParent))
return appliedFilters;
// TODO: avoid query parsing per-bucket somehow...
String parentStr = freq.domain.parents;
Query parentQuery;
try {
QParser parser = QParser.getParser(parentStr, fcontext.req);
parser.setIsFilter(true);
parentQuery = parser.getQuery();
} catch (SyntaxError err) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing block join parent specification: " + parentStr);
}
BitDocSet parents = fcontext.searcher.getDocSetBits(parentQuery);
DocSet input = fcontext.base;
DocSet result;
if (freq.domain.toChildren) {
// If there are filters on this facet, then use them as acceptDocs when executing toChildren.
// We need to remember to not redundantly re-apply these filters after.
DocSet acceptDocs = this.filter;
if (acceptDocs == null) {
acceptDocs = fcontext.searcher.getLiveDocs();
} else {
appliedFilters = true;
}
result = BlockJoin.toChildren(input, parents, acceptDocs, fcontext.qcontext);
} else {
result = BlockJoin.toParents(input, parents, fcontext.qcontext);
}
fcontext.base = result;
return appliedFilters;
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class FacetProcessor method evalFilters.
private void evalFilters() throws IOException {
if (freq.domain.filters == null || freq.domain.filters.isEmpty())
return;
List<Query> qlist = new ArrayList<>(freq.domain.filters.size());
// TODO: prevent parsing filters each time!
for (Object rawFilter : freq.domain.filters) {
if (rawFilter instanceof String) {
QParser parser = null;
try {
parser = QParser.getParser((String) rawFilter, fcontext.req);
parser.setIsFilter(true);
Query symbolicFilter = parser.getQuery();
qlist.add(symbolicFilter);
} catch (SyntaxError syntaxError) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
} else if (rawFilter instanceof Map) {
Map<String, Object> m = (Map<String, Object>) rawFilter;
String type;
Object args;
if (m.size() == 1) {
Map.Entry<String, Object> entry = m.entrySet().iterator().next();
type = entry.getKey();
args = entry.getValue();
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't convert map to query:" + rawFilter);
}
if (!"param".equals(type)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown type. Can't convert map to query:" + rawFilter);
}
String tag;
if (!(args instanceof String)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't retrieve non-string param:" + args);
}
tag = (String) args;
String[] qstrings = fcontext.req.getParams().getParams(tag);
if (qstrings != null) {
for (String qstring : qstrings) {
QParser parser = null;
try {
parser = QParser.getParser((String) qstring, fcontext.req);
parser.setIsFilter(true);
Query symbolicFilter = parser.getQuery();
qlist.add(symbolicFilter);
} catch (SyntaxError syntaxError) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
}
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad query (expected a string):" + rawFilter);
}
}
this.filter = fcontext.searcher.getDocSet(qlist);
}
use of org.apache.solr.search.QParser in project lucene-solr by apache.
the class DirectUpdateHandler2 method getQuery.
private Query getQuery(DeleteUpdateCommand cmd) {
Query q;
try {
// move this higher in the stack?
QParser parser = QParser.getParser(cmd.getQuery(), cmd.req);
q = parser.getQuery();
q = QueryUtils.makeQueryable(q);
// Make sure not to delete newer versions
if (ulog != null && cmd.getVersion() != 0 && cmd.getVersion() != -Long.MAX_VALUE) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(q, Occur.MUST);
SchemaField sf = ulog.getVersionInfo().getVersionField();
ValueSource vs = sf.getType().getValueSource(sf, null);
ValueSourceRangeFilter filt = new ValueSourceRangeFilter(vs, Long.toString(Math.abs(cmd.getVersion())), null, true, true);
FunctionRangeQuery range = new FunctionRangeQuery(filt);
// formulated in the "MUST_NOT" sense so we can delete docs w/o a version (some tests depend on this...)
bq.add(range, Occur.MUST_NOT);
q = bq.build();
}
return q;
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
Aggregations