use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class AddBlockUpdateTest method testExceptionThrown.
@Test
public void testExceptionThrown() throws Exception {
final String abcD = block("abcD").asXML();
log.info(abcD);
assertBlockU(abcD);
Document docToFail = DocumentHelper.createDocument();
Element root = docToFail.addElement("add");
Element doc1 = root.addElement("doc");
attachField(doc1, "id", id());
attachField(doc1, parent, "Y");
attachField(doc1, "sample_i", "notanumber/ignore_exception");
Element subDoc1 = doc1.addElement("doc");
attachField(subDoc1, "id", id());
attachField(subDoc1, child, "x");
Element doc2 = root.addElement("doc");
attachField(doc2, "id", id());
attachField(doc2, parent, "W");
assertFailedBlockU(docToFail.asXML());
assertBlockU(block("efgH").asXML());
assertBlockU(commit());
final SolrIndexSearcher searcher = getSearcher();
assertQ(req("q", "*:*", "indent", "true", "fl", "id,parent_s,child_s"), "//*[@numFound='" + "abcDefgH".length() + "']");
assertSingleParentOf(searcher, one("abc"), "D");
assertSingleParentOf(searcher, one("efg"), "H");
assertQ(req(child + ":x"), "//*[@numFound='0']");
assertQ(req(parent + ":Y"), "//*[@numFound='0']");
assertQ(req(parent + ":W"), "//*[@numFound='0']");
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class CarrotClusteringEngineTest method checkEngine.
private List<NamedList<Object>> checkEngine(CarrotClusteringEngine engine, int expectedNumDocs, int expectedNumClusters, Query query, SolrParams clusteringParams) throws IOException {
// Get all documents to cluster
RefCounted<SolrIndexSearcher> ref = h.getCore().getSearcher();
DocList docList;
try {
SolrIndexSearcher searcher = ref.get();
docList = searcher.getDocList(query, (Query) null, new Sort(), 0, numberOfDocs);
assertEquals("docList size", expectedNumDocs, docList.matches());
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.add(clusteringParams);
// Perform clustering
LocalSolrQueryRequest req = new LocalSolrQueryRequest(h.getCore(), solrParams);
Map<SolrDocument, Integer> docIds = new HashMap<>(docList.size());
SolrDocumentList solrDocList = ClusteringComponent.docListToSolrDocumentList(docList, searcher, engine.getFieldsToLoad(req), docIds);
@SuppressWarnings("unchecked") List<NamedList<Object>> results = (List<NamedList<Object>>) engine.cluster(query, solrDocList, docIds, req);
req.close();
assertEquals("number of clusters: " + results, expectedNumClusters, results.size());
checkClusters(results, false);
return results;
} finally {
ref.decref();
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class ClusteringComponentTest method testDocListConversion.
// tests ClusteringComponent.docListToSolrDocumentList
@Test
public void testDocListConversion() throws Exception {
assertU("", adoc("id", "3234", "url", "ignoreme", "val_i", "1", "val_dynamic", "quick red fox"));
assertU("", adoc("id", "3235", "url", "ignoreme", "val_i", "1", "val_dynamic", "quick green fox"));
assertU("", adoc("id", "3236", "url", "ignoreme", "val_i", "1", "val_dynamic", "quick brown fox"));
assertU("", commit());
RefCounted<SolrIndexSearcher> holder = h.getCore().getSearcher();
try {
SolrIndexSearcher srchr = holder.get();
QueryResult qr = new QueryResult();
QueryCommand cmd = new QueryCommand();
cmd.setQuery(new MatchAllDocsQuery());
cmd.setLen(10);
qr = srchr.search(qr, cmd);
DocList docs = qr.getDocList();
assertEquals("wrong docs size", 3, docs.size());
Set<String> fields = new HashSet<>();
fields.add("val_dynamic");
fields.add("dynamic_val");
// copied from id
fields.add("range_facet_l");
SolrDocumentList list = ClusteringComponent.docListToSolrDocumentList(docs, srchr, fields, null);
assertEquals("wrong list Size", docs.size(), list.size());
for (SolrDocument document : list) {
assertTrue("unexpected field", !document.containsKey("val_i"));
assertTrue("unexpected id field", !document.containsKey("id"));
assertTrue("original field", document.containsKey("val_dynamic"));
assertTrue("dyn copy field", document.containsKey("dynamic_val"));
assertTrue("copy field", document.containsKey("range_facet_l"));
assertNotNull("original field null", document.get("val_dynamic"));
assertNotNull("dyn copy field null", document.get("dynamic_val"));
assertNotNull("copy field null", document.get("range_facet_l"));
}
} finally {
if (null != holder)
holder.decref();
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class SolrCore method openNewSearcher.
/** Opens a new searcher and returns a RefCounted<SolrIndexSearcher> with its reference incremented.
*
* "realtime" means that we need to open quickly for a realtime view of the index, hence don't do any
* autowarming and add to the _realtimeSearchers queue rather than the _searchers queue (so it won't
* be used for autowarming by a future normal searcher). A "realtime" searcher will currently never
* become "registered" (since it currently lacks caching).
*
* realtimeSearcher is updated to the latest opened searcher, regardless of the value of "realtime".
*
* This method acquires openSearcherLock - do not call with searchLock held!
*/
public RefCounted<SolrIndexSearcher> openNewSearcher(boolean updateHandlerReopens, boolean realtime) {
if (isClosed()) {
// catch some errors quicker
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
}
SolrIndexSearcher tmp;
RefCounted<SolrIndexSearcher> newestSearcher = null;
openSearcherLock.lock();
try {
String newIndexDir = getNewIndexDir();
String indexDirFile = null;
String newIndexDirFile = null;
// if it's not a normal near-realtime update, check that paths haven't changed.
if (!updateHandlerReopens) {
indexDirFile = getDirectoryFactory().normalize(getIndexDir());
newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
}
synchronized (searcherLock) {
newestSearcher = realtimeSearcher;
if (newestSearcher != null) {
// the matching decref is in the finally block
newestSearcher.incref();
}
}
if (newestSearcher != null && (updateHandlerReopens || indexDirFile.equals(newIndexDirFile))) {
DirectoryReader newReader;
DirectoryReader currentReader = newestSearcher.get().getRawReader();
// SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);
RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(null);
try {
if (writer != null) {
// if in NRT mode, open from the writer
newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true);
} else {
// verbose("start reopen without writer, reader=", currentReader);
newReader = DirectoryReader.openIfChanged(currentReader);
// verbose("reopen result", newReader);
}
} finally {
if (writer != null) {
writer.decref();
}
}
if (newReader == null) {
if (realtime) {
// if this is a request for a realtime searcher, just return the same searcher
newestSearcher.incref();
return newestSearcher;
} else if (newestSearcher.get().isCachingEnabled() && newestSearcher.get().getSchema() == getLatestSchema()) {
// absolutely nothing has changed, can use the same searcher
// but log a message about it to minimize confusion
newestSearcher.incref();
log.debug("SolrIndexSearcher has not changed - not re-opening: " + newestSearcher.get().getName());
return newestSearcher;
}
// ELSE: open a new searcher against the old reader...
currentReader.incRef();
newReader = currentReader;
}
// for now, turn off caches if this is for a realtime reader
// (caches take a little while to instantiate)
final boolean useCaches = !realtime;
final String newName = realtime ? "realtime" : "main";
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), newName, newReader, true, useCaches, true, directoryFactory);
} else {
if (newReaderCreator != null) {
// this is set in the constructor if there is a currently open index writer
// so that we pick up any uncommitted changes and so we don't go backwards
// in time on a core reload
DirectoryReader newReader = newReaderCreator.call();
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
} else {
RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(this);
DirectoryReader newReader = null;
try {
newReader = indexReaderFactory.newReader(writer.get(), this);
} finally {
writer.decref();
}
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
}
}
List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
// refcount now at 1
RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList);
// Increment reference again for "realtimeSearcher" variable. It should be at 2 after.
// When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
// it will be closed.
newSearcher.incref();
synchronized (searcherLock) {
// closed, clean up the new searcher and bail.
if (isClosed()) {
// once for caller since we're not returning it
newSearcher.decref();
// once for ourselves since it won't be "replaced"
newSearcher.decref();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
}
if (realtimeSearcher != null) {
realtimeSearcher.decref();
}
realtimeSearcher = newSearcher;
searcherList.add(realtimeSearcher);
}
return newSearcher;
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error opening new searcher", e);
} finally {
openSearcherLock.unlock();
if (newestSearcher != null) {
newestSearcher.decref();
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class QuerySenderListener method newSearcher.
@Override
public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
final SolrIndexSearcher searcher = newSearcher;
log.info("QuerySenderListener sending requests to " + newSearcher);
List<NamedList> allLists = (List<NamedList>) getArgs().get("queries");
if (allLists == null)
return;
boolean createNewReqInfo = SolrRequestInfo.getRequestInfo() == null;
for (NamedList nlst : allLists) {
SolrQueryRequest req = null;
try {
// bind the request to a particular searcher (the newSearcher)
NamedList params = addEventParms(currentSearcher, nlst);
// for this, we default to distrib = false
if (params.get(DISTRIB) == null) {
params.add(DISTRIB, false);
}
req = new LocalSolrQueryRequest(getCore(), params) {
@Override
public SolrIndexSearcher getSearcher() {
return searcher;
}
@Override
public void close() {
}
};
SolrQueryResponse rsp = new SolrQueryResponse();
if (createNewReqInfo) {
// SolrRequerstInfo for this thread could have been transferred from the parent
// thread.
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
}
getCore().execute(getCore().getRequestHandler(req.getParams().get(CommonParams.QT)), req, rsp);
// Retrieve the Document instances (not just the ids) to warm
// the OS disk cache, and any Solr document cache. Only the top
// level values in the NamedList are checked for DocLists.
NamedList values = rsp.getValues();
for (int i = 0; i < values.size(); i++) {
Object o = values.getVal(i);
if (o instanceof ResultContext) {
o = ((ResultContext) o).getDocList();
}
if (o instanceof DocList) {
DocList docs = (DocList) o;
for (DocIterator iter = docs.iterator(); iter.hasNext(); ) {
newSearcher.doc(iter.nextDoc());
}
}
}
} catch (Exception e) {
// do nothing... we want to continue with the other requests.
// the failure should have already been logged.
} finally {
if (req != null)
req.close();
if (createNewReqInfo)
SolrRequestInfo.clearRequestInfo();
}
}
log.info("QuerySenderListener done.");
}
Aggregations