use of org.apache.solr.search.SyntaxError in project lucene-solr by apache.
the class TestIntervalFaceting method assertBadInterval.
private void assertBadInterval(String fieldName, String intervalStr, String errorMsg) {
SchemaField f = h.getCore().getLatestSchema().getField(fieldName);
try {
new FacetInterval(f, intervalStr, new ModifiableSolrParams());
fail("Expecting SyntaxError for interval String: " + intervalStr);
} catch (SyntaxError e) {
assertTrue("Unexpected error message for interval String: " + intervalStr + ": " + e.getMessage(), e.getMessage().contains(errorMsg));
}
}
use of org.apache.solr.search.SyntaxError in project lucene-solr by apache.
the class RealTimeGetComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
CloudDescriptor cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
if (cloudDesc != null) {
Replica.Type replicaType = cloudDesc.getReplicaType();
if (replicaType != null) {
if (replicaType == Replica.Type.PULL) {
throw new SolrException(ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", cloudDesc.getCoreNodeName(), Replica.Type.PULL));
}
// non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
}
}
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
// This seems rather kludgey, may there is better way to indicate
// that replica can support handling version ranges
String val = params.get("checkCanHandleVersionRanges");
if (val != null) {
rb.rsp.add("canHandleVersionRanges", true);
return;
}
val = params.get("getFingerprint");
if (val != null) {
processGetFingeprint(rb);
return;
}
val = params.get("getVersions");
if (val != null) {
processGetVersions(rb);
return;
}
val = params.get("getUpdates");
if (val != null) {
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = req.getCore().getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(req.getCore().getCoreContainer().getZkController().getNodeName() + " min count to sync to (from most recent searcher view) " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
processGetUpdates(rb);
return;
}
val = params.get("getInputDocument");
if (val != null) {
processGetInputDocument(rb);
return;
}
final IdsRequsted reqIds = IdsRequsted.parseParams(req);
if (reqIds.allIds.isEmpty()) {
return;
}
// parse any existing filters
try {
String[] fqs = req.getParams().getParams(CommonParams.FQ);
if (fqs != null && fqs.length != 0) {
List<Query> filters = rb.getFilters();
// if filters already exists, make a copy instead of modifying the original
filters = filters == null ? new ArrayList<Query>(fqs.length) : new ArrayList<>(filters);
for (String fq : fqs) {
if (fq != null && fq.trim().length() != 0) {
QParser fqp = QParser.getParser(fq, req);
filters.add(fqp.getQuery());
}
}
if (!filters.isEmpty()) {
rb.setFilters(filters);
}
}
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
final SolrCore core = req.getCore();
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
FieldType fieldType = idField.getType();
SolrDocumentList docList = new SolrDocumentList();
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
SearcherInfo searcherInfo = new SearcherInfo(core);
// this is initialized & set on the context *after* any searcher (re-)opening
ResultContext resultContext = null;
final DocTransformer transformer = rsp.getReturnFields().getTransformer();
// true in any situation where we have to use a realtime searcher rather then returning docs
// directly from the UpdateLog
final boolean mustUseRealtimeSearcher = // if we have filters, we need to check those against the indexed form of the doc
(rb.getFilters() != null) || ((null != transformer) && transformer.needsSolrIndexSearcher());
try {
BytesRefBuilder idBytes = new BytesRefBuilder();
for (String idStr : reqIds.allIds) {
fieldType.readableToIndexed(idStr, idBytes);
if (ulog != null) {
Object o = ulog.lookup(idBytes.get());
if (o != null) {
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List) o;
assert entry.size() >= 3;
int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
switch(oper) {
// fall through to ADD
case UpdateLog.UPDATE_INPLACE:
case UpdateLog.ADD:
if (mustUseRealtimeSearcher) {
// close handles to current searchers & result context
searcherInfo.clear();
resultContext = null;
// force open a new realtime searcher
ulog.openRealtimeSearcher();
// pretend we never found this record and fall through to use the searcher
o = null;
break;
}
SolrDocument doc;
if (oper == UpdateLog.ADD) {
doc = toSolrDoc((SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
} else if (oper == UpdateLog.UPDATE_INPLACE) {
assert entry.size() == 5;
// For in-place update case, we have obtained the partial document till now. We need to
// resolve it to a full document to be returned to the user.
doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument) entry.get(entry.size() - 1), entry, null);
if (doc == null) {
// document has been deleted as the resolve was going on
break;
}
} else {
throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
}
if (transformer != null) {
// unknown docID
transformer.transform(doc, -1, 0);
}
docList.add(doc);
break;
case UpdateLog.DELETE:
break;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
}
if (o != null)
continue;
}
}
// didn't find it in the update log, so it should be in the newest searcher opened
searcherInfo.init();
// don't bother with ResultContext yet, we won't need it if doc doesn't match filters
int docid = -1;
long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
if (segAndId >= 0) {
int segid = (int) segAndId;
LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
docid = segid + ctx.docBase;
if (rb.getFilters() != null) {
for (Query raw : rb.getFilters()) {
Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
Scorer scorer = searcherInfo.getSearcher().createWeight(q, false, 1f).scorer(ctx);
if (scorer == null || segid != scorer.iterator().advance(segid)) {
// filter doesn't match.
docid = -1;
break;
}
}
}
}
if (docid < 0)
continue;
Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
if (null != transformer) {
if (null == resultContext) {
// either first pass, or we've re-opened searcher - either way now we setContext
resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
transformer.setContext(resultContext);
}
transformer.transform(doc, docid, 0);
}
docList.add(doc);
}
} finally {
searcherInfo.clear();
}
addDocListToResponse(rb, docList);
}
use of org.apache.solr.search.SyntaxError in project lucene-solr by apache.
the class SolrPluginUtils method doSimpleQuery.
/**
* Executes a basic query
*/
public static DocList doSimpleQuery(String sreq, SolrQueryRequest req, int start, int limit) throws IOException {
List<String> commands = StrUtils.splitSmart(sreq, ';');
String qs = commands.size() >= 1 ? commands.get(0) : "";
try {
Query query = QParser.getParser(qs, req).getQuery();
// If the first non-query, non-filter command is a simple sort on an indexed field, then
// we can use the Lucene sort ability.
Sort sort = null;
if (commands.size() >= 2) {
sort = SortSpecParsing.parseSortSpec(commands.get(1), req).getSort();
}
DocList results = req.getSearcher().getDocList(query, (DocSet) null, sort, start, limit);
return results;
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing query: " + qs);
}
}
use of org.apache.solr.search.SyntaxError in project lucene-solr by apache.
the class FacetQueryMerger method prepare.
@Override
public void prepare(ResponseBuilder rb) throws IOException {
Map<String, Object> json = rb.req.getJSON();
Map<String, Object> jsonFacet = null;
if (json == null) {
int version = rb.req.getParams().getInt("facet.version", 1);
if (version <= 1)
return;
boolean facetsEnabled = rb.req.getParams().getBool(FacetParams.FACET, false);
if (!facetsEnabled)
return;
jsonFacet = new LegacyFacet(rb.req.getParams()).getLegacy();
} else {
jsonFacet = (Map<String, Object>) json.get("facet");
}
if (jsonFacet == null)
return;
SolrParams params = rb.req.getParams();
boolean isShard = params.getBool(ShardParams.IS_SHARD, false);
Map<String, Object> facetInfo = null;
if (isShard) {
String jfacet = params.get(FACET_INFO);
if (jfacet == null) {
// if this is a shard request, but there is no _facet_ info, then don't do anything.
return;
}
facetInfo = (Map<String, Object>) ObjectBuilder.fromJSON(jfacet);
}
// At this point, we know we need to do something. Create and save the state.
rb.setNeedDocSet(true);
// Parse the facet in the prepare phase?
FacetParser parser = new FacetTopParser(rb.req);
FacetRequest facetRequest = null;
try {
facetRequest = parser.parse(jsonFacet);
} catch (SyntaxError syntaxError) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
FacetComponentState fcState = new FacetComponentState();
fcState.rb = rb;
fcState.isShard = isShard;
fcState.facetInfo = facetInfo;
fcState.facetCommands = jsonFacet;
fcState.facetRequest = facetRequest;
rb.req.getContext().put(FacetComponentState.class, fcState);
}
use of org.apache.solr.search.SyntaxError in project lucene-solr by apache.
the class FacetProcessor method handleFilterExclusions.
private void handleFilterExclusions() throws IOException {
List<String> excludeTags = freq.domain.excludeTags;
if (excludeTags == null || excludeTags.size() == 0) {
return;
}
// TODO: somehow remove responsebuilder dependency
ResponseBuilder rb = SolrRequestInfo.getRequestInfo().getResponseBuilder();
Map tagMap = (Map) rb.req.getContext().get("tags");
if (tagMap == null) {
// no filters were tagged
return;
}
IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
for (String excludeTag : excludeTags) {
Object olst = tagMap.get(excludeTag);
// tagMap has entries of List<String,List<QParser>>, but subject to change in the future
if (!(olst instanceof Collection))
continue;
for (Object o : (Collection<?>) olst) {
if (!(o instanceof QParser))
continue;
QParser qp = (QParser) o;
try {
excludeSet.put(qp.getQuery(), Boolean.TRUE);
} catch (SyntaxError syntaxError) {
// This should not happen since we should only be retrieving a previously parsed query
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
}
}
if (excludeSet.size() == 0)
return;
List<Query> qlist = new ArrayList<>();
// add the base query
if (!excludeSet.containsKey(rb.getQuery())) {
qlist.add(rb.getQuery());
}
// add the filters
if (rb.getFilters() != null) {
for (Query q : rb.getFilters()) {
if (!excludeSet.containsKey(q)) {
qlist.add(q);
}
}
}
// TODO: we lose parent exclusions...
for (FacetContext curr = fcontext; curr != null; curr = curr.parent) {
if (curr.filter != null) {
qlist.add(curr.filter);
}
}
// recompute the base domain
fcontext.base = fcontext.searcher.getDocSet(qlist);
}
Aggregations