use of org.apache.solr.util.RefCounted in project lucene-solr by apache.
the class TestTlogReplica method testOnlyLeaderIndexes.
public void testOnlyLeaderIndexes() throws Exception {
createAndWaitForCollection(1, 0, 2, 0);
CloudSolrClient cloudClient = cluster.getSolrClient();
new UpdateRequest().add(sdoc("id", "1")).add(sdoc("id", "2")).add(sdoc("id", "3")).add(sdoc("id", "4")).process(cloudClient, collectionName);
{
UpdateHandler updateHandler = getSolrCore(true).get(0).getUpdateHandler();
RefCounted<IndexWriter> iwRef = updateHandler.getSolrCoreState().getIndexWriter(null);
assertTrue("IndexWriter at leader must see updates ", iwRef.get().hasUncommittedChanges());
iwRef.decref();
}
for (SolrCore solrCore : getSolrCore(false)) {
RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
iwRef.decref();
}
checkRTG(1, 4, cluster.getJettySolrRunners());
new UpdateRequest().deleteById("1").deleteByQuery("id:2").process(cloudClient, collectionName);
// The DBQ is not processed at replicas, so we still can get doc2 and other docs by RTG
checkRTG(2, 4, getSolrRunner(false));
new UpdateRequest().commit(cloudClient, collectionName);
waitForNumDocsInAllActiveReplicas(2);
// Update log roll over
for (SolrCore solrCore : getSolrCore(false)) {
UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
assertFalse(updateLog.hasUncommittedChanges());
}
// UpdateLog copy over old updates
for (int i = 15; i <= 150; i++) {
cloudClient.add(collectionName, sdoc("id", String.valueOf(i)));
if (random().nextInt(100) < 15 & i != 150) {
cloudClient.commit(collectionName);
}
}
checkRTG(120, 150, cluster.getJettySolrRunners());
waitForReplicasCatchUp(20);
}
use of org.apache.solr.util.RefCounted in project lucene-solr by apache.
the class MergeIndexesOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
String cname = params.required().get(CoreAdminParams.CORE);
SolrCore core = it.handler.coreContainer.getCore(cname);
SolrQueryRequest wrappedReq = null;
List<SolrCore> sourceCores = Lists.newArrayList();
List<RefCounted<SolrIndexSearcher>> searchers = Lists.newArrayList();
// stores readers created from indexDir param values
List<DirectoryReader> readersToBeClosed = Lists.newArrayList();
Map<Directory, Boolean> dirsToBeReleased = new HashMap<>();
if (core != null) {
try {
String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
if (dirNames == null || dirNames.length == 0) {
String[] sources = params.getParams("srcCore");
if (sources == null || sources.length == 0)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "At least one indexDir or srcCore must be specified");
for (int i = 0; i < sources.length; i++) {
String source = sources[i];
SolrCore srcCore = it.handler.coreContainer.getCore(source);
if (srcCore == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core: " + source + " does not exist");
sourceCores.add(srcCore);
}
} else {
DirectoryFactory dirFactory = core.getDirectoryFactory();
for (int i = 0; i < dirNames.length; i++) {
boolean markAsDone = false;
if (dirFactory instanceof CachingDirectoryFactory) {
if (!((CachingDirectoryFactory) dirFactory).getLivePaths().contains(dirNames[i])) {
markAsDone = true;
}
}
Directory dir = dirFactory.get(dirNames[i], DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
dirsToBeReleased.put(dir, markAsDone);
// TODO: why doesn't this use the IR factory? what is going on here?
readersToBeClosed.add(DirectoryReader.open(dir));
}
}
List<DirectoryReader> readers = null;
if (readersToBeClosed.size() > 0) {
readers = readersToBeClosed;
} else {
readers = Lists.newArrayList();
for (SolrCore solrCore : sourceCores) {
// record the searchers so that we can decref
RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
searchers.add(searcher);
readers.add(searcher.get().getIndexReader());
}
}
UpdateRequestProcessorChain processorChain = core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
wrappedReq = new LocalSolrQueryRequest(core, it.req.getParams());
UpdateRequestProcessor processor = processorChain.createProcessor(wrappedReq, it.rsp);
processor.processMergeIndexes(new MergeIndexesCommand(readers, it.req));
} catch (Exception e) {
// log and rethrow so that if the finally fails we don't lose the original problem
log.error("ERROR executing merge:", e);
throw e;
} finally {
for (RefCounted<SolrIndexSearcher> searcher : searchers) {
if (searcher != null)
searcher.decref();
}
for (SolrCore solrCore : sourceCores) {
if (solrCore != null)
solrCore.close();
}
IOUtils.closeWhileHandlingException(readersToBeClosed);
Set<Map.Entry<Directory, Boolean>> entries = dirsToBeReleased.entrySet();
for (Map.Entry<Directory, Boolean> entry : entries) {
DirectoryFactory dirFactory = core.getDirectoryFactory();
Directory dir = entry.getKey();
boolean markAsDone = entry.getValue();
if (markAsDone) {
dirFactory.doneWithDirectory(dir);
}
dirFactory.release(dir);
}
if (wrappedReq != null)
wrappedReq.close();
core.close();
}
}
}
use of org.apache.solr.util.RefCounted in project lucene-solr by apache.
the class SolrCore method openNewSearcher.
/** Opens a new searcher and returns a RefCounted<SolrIndexSearcher> with its reference incremented.
*
* "realtime" means that we need to open quickly for a realtime view of the index, hence don't do any
* autowarming and add to the _realtimeSearchers queue rather than the _searchers queue (so it won't
* be used for autowarming by a future normal searcher). A "realtime" searcher will currently never
* become "registered" (since it currently lacks caching).
*
* realtimeSearcher is updated to the latest opened searcher, regardless of the value of "realtime".
*
* This method acquires openSearcherLock - do not call with searchLock held!
*/
public RefCounted<SolrIndexSearcher> openNewSearcher(boolean updateHandlerReopens, boolean realtime) {
if (isClosed()) {
// catch some errors quicker
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
}
SolrIndexSearcher tmp;
RefCounted<SolrIndexSearcher> newestSearcher = null;
openSearcherLock.lock();
try {
String newIndexDir = getNewIndexDir();
String indexDirFile = null;
String newIndexDirFile = null;
// if it's not a normal near-realtime update, check that paths haven't changed.
if (!updateHandlerReopens) {
indexDirFile = getDirectoryFactory().normalize(getIndexDir());
newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
}
synchronized (searcherLock) {
newestSearcher = realtimeSearcher;
if (newestSearcher != null) {
// the matching decref is in the finally block
newestSearcher.incref();
}
}
if (newestSearcher != null && (updateHandlerReopens || indexDirFile.equals(newIndexDirFile))) {
DirectoryReader newReader;
DirectoryReader currentReader = newestSearcher.get().getRawReader();
// SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);
RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(null);
try {
if (writer != null) {
// if in NRT mode, open from the writer
newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true);
} else {
// verbose("start reopen without writer, reader=", currentReader);
newReader = DirectoryReader.openIfChanged(currentReader);
// verbose("reopen result", newReader);
}
} finally {
if (writer != null) {
writer.decref();
}
}
if (newReader == null) {
if (realtime) {
// if this is a request for a realtime searcher, just return the same searcher
newestSearcher.incref();
return newestSearcher;
} else if (newestSearcher.get().isCachingEnabled() && newestSearcher.get().getSchema() == getLatestSchema()) {
// absolutely nothing has changed, can use the same searcher
// but log a message about it to minimize confusion
newestSearcher.incref();
log.debug("SolrIndexSearcher has not changed - not re-opening: " + newestSearcher.get().getName());
return newestSearcher;
}
// ELSE: open a new searcher against the old reader...
currentReader.incRef();
newReader = currentReader;
}
// for now, turn off caches if this is for a realtime reader
// (caches take a little while to instantiate)
final boolean useCaches = !realtime;
final String newName = realtime ? "realtime" : "main";
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), newName, newReader, true, useCaches, true, directoryFactory);
} else {
if (newReaderCreator != null) {
// this is set in the constructor if there is a currently open index writer
// so that we pick up any uncommitted changes and so we don't go backwards
// in time on a core reload
DirectoryReader newReader = newReaderCreator.call();
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
} else {
RefCounted<IndexWriter> writer = getSolrCoreState().getIndexWriter(this);
DirectoryReader newReader = null;
try {
newReader = indexReaderFactory.newReader(writer.get(), this);
} finally {
writer.decref();
}
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
}
}
List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
// refcount now at 1
RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList);
// Increment reference again for "realtimeSearcher" variable. It should be at 2 after.
// When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
// it will be closed.
newSearcher.incref();
synchronized (searcherLock) {
// closed, clean up the new searcher and bail.
if (isClosed()) {
// once for caller since we're not returning it
newSearcher.decref();
// once for ourselves since it won't be "replaced"
newSearcher.decref();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
}
if (realtimeSearcher != null) {
realtimeSearcher.decref();
}
realtimeSearcher = newSearcher;
searcherList.add(realtimeSearcher);
}
return newSearcher;
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error opening new searcher", e);
} finally {
openSearcherLock.unlock();
if (newestSearcher != null) {
newestSearcher.decref();
}
}
}
Aggregations