use of org.apache.solr.update.UpdateLog in project lucene-solr by apache.
the class TestStressRecovery method testStressRecovery.
// This version simulates updates coming from the leader and sometimes being reordered
// and tests the ability to buffer updates and apply them later
@Test
public void testStressRecovery() throws Exception {
assumeFalse("FIXME: This test is horribly slow sometimes on Windows!", Constants.WINDOWS);
clearIndex();
assertU(commit());
final int commitPercent = 5 + random().nextInt(10);
// what percent of the commits are soft
final int softCommitPercent = 30 + random().nextInt(75);
final int deletePercent = 4 + random().nextInt(25);
final int deleteByQueryPercent = random().nextInt(5);
final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200));
// fewer write threads to give recovery thread more of a chance
int nWriteThreads = 2 + random().nextInt(10);
final int maxConcurrentCommits = nWriteThreads;
// query variables
final int percentRealtimeQuery = 75;
final int percentGetLatestVersions = random().nextInt(4);
// number of recovery loops to perform
final AtomicLong operations = new AtomicLong(atLeast(100));
// fewer read threads to give writers more of a chance
int nReadThreads = 2 + random().nextInt(10);
initModel(ndocs);
final AtomicInteger numCommitting = new AtomicInteger();
List<Thread> threads = new ArrayList<>();
final AtomicLong testVersion = new AtomicLong(0);
final UpdateHandler uHandler = h.getCore().getUpdateHandler();
final UpdateLog uLog = uHandler.getUpdateLog();
final VersionInfo vInfo = uLog.getVersionInfo();
final Object stateChangeLock = new Object();
this.visibleModel = model;
final Semaphore[] writePermissions = new Semaphore[nWriteThreads];
for (int i = 0; i < nWriteThreads; i++) writePermissions[i] = new Semaphore(Integer.MAX_VALUE, false);
final Semaphore readPermission = new Semaphore(Integer.MAX_VALUE, false);
for (int i = 0; i < nWriteThreads; i++) {
final int threadNum = i;
Thread thread = new Thread("WRITER" + i) {
Random rand = new Random(random().nextInt());
Semaphore writePermission = writePermissions[threadNum];
@Override
public void run() {
try {
while (operations.get() > 0) {
writePermission.acquire();
int oper = rand.nextInt(10);
if (oper < commitPercent) {
if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
Map<Integer, DocInfo> newCommittedModel;
long version;
synchronized (globalLock) {
// take a snapshot
newCommittedModel = new HashMap<>(model);
version = snapshotCount++;
}
synchronized (stateChangeLock) {
// so change the version to -1 so we won't update our model.
if (uLog.getState() != UpdateLog.State.ACTIVE)
version = -1;
if (rand.nextInt(100) < softCommitPercent) {
verbose("softCommit start");
assertU(TestHarness.commit("softCommit", "true"));
verbose("softCommit end");
} else {
verbose("hardCommit start");
assertU(commit());
verbose("hardCommit end");
}
}
synchronized (globalLock) {
// install this model only if we are not in recovery mode.
if (version >= committedModelClock) {
if (VERBOSE) {
verbose("installing new committedModel version=" + committedModelClock);
}
committedModel = newCommittedModel;
committedModelClock = version;
}
}
}
numCommitting.decrementAndGet();
continue;
}
int id;
if (rand.nextBoolean()) {
id = rand.nextInt(ndocs);
} else {
// reuse the last ID half of the time to force more race conditions
id = lastId;
}
// set the lastId before we actually change it sometimes to try and
// uncover more race conditions between writing and reading
boolean before = rand.nextBoolean();
if (before) {
lastId = id;
}
DocInfo info = model.get(id);
long val = info.val;
long nextVal = Math.abs(val) + 1;
// the version we set on the update should determine who wins
// These versions are not derived from the actual leader update handler hand hence this
// test may need to change depending on how we handle version numbers.
long version = testVersion.incrementAndGet();
// yield after getting the next version to increase the odds of updates happening out of order
if (rand.nextBoolean())
Thread.yield();
if (oper < commitPercent + deletePercent) {
verbose("deleting id", id, "val=", nextVal, "version", version);
Long returnedVersion = deleteAndGetVersion(Integer.toString(id), params("_version_", Long.toString(-version), DISTRIB_UPDATE_PARAM, FROM_LEADER));
// but if we do return, they had better be equal
if (returnedVersion != null) {
assertEquals(-version, returnedVersion.longValue());
}
// only update model if the version is newer
synchronized (model) {
DocInfo currInfo = model.get(id);
if (Math.abs(version) > Math.abs(currInfo.version)) {
model.put(id, new DocInfo(version, -nextVal));
}
}
verbose("deleting id", id, "val=", nextVal, "version", version, "DONE");
} else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
verbose("deleteByQuery id", id, "val=", nextVal, "version", version);
Long returnedVersion = deleteByQueryAndGetVersion("id:" + Integer.toString(id), params("_version_", Long.toString(-version), DISTRIB_UPDATE_PARAM, FROM_LEADER));
// but if we do return, they had better be equal
if (returnedVersion != null) {
assertEquals(-version, returnedVersion.longValue());
}
// only update model if the version is newer
synchronized (model) {
DocInfo currInfo = model.get(id);
if (Math.abs(version) > Math.abs(currInfo.version)) {
model.put(id, new DocInfo(version, -nextVal));
}
}
verbose("deleteByQuery id", id, "val=", nextVal, "version", version, "DONE");
} else {
verbose("adding id", id, "val=", nextVal, "version", version);
Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal), "_version_", Long.toString(version)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
if (returnedVersion != null) {
assertEquals(version, returnedVersion.longValue());
}
// only update model if the version is newer
synchronized (model) {
DocInfo currInfo = model.get(id);
if (version > currInfo.version) {
model.put(id, new DocInfo(version, nextVal));
}
}
if (VERBOSE) {
verbose("adding id", id, "val=", nextVal, "version", version, "DONE");
}
}
if (!before) {
lastId = id;
}
}
} catch (Throwable e) {
operations.set(-1L);
throw new RuntimeException(e);
}
}
};
threads.add(thread);
}
for (int i = 0; i < nReadThreads; i++) {
Thread thread = new Thread("READER" + i) {
Random rand = new Random(random().nextInt());
@Override
public void run() {
try {
while (operations.get() > 0) {
// throttle reads (don't completely stop)
readPermission.tryAcquire(10, TimeUnit.MILLISECONDS);
// bias toward a recently changed doc
int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);
// when indexing, we update the index, then the model
// so when querying, we should first check the model, and then the index
boolean realTime = rand.nextInt(100) < percentRealtimeQuery;
DocInfo info;
if (realTime) {
info = visibleModel.get(id);
} else {
synchronized (globalLock) {
info = committedModel.get(id);
}
}
if (VERBOSE) {
verbose("querying id", id);
}
SolrQueryRequest sreq;
if (realTime) {
sreq = req("wt", "json", "qt", "/get", "ids", Integer.toString(id));
} else {
sreq = req("wt", "json", "q", "id:" + Integer.toString(id), "omitHeader", "true");
}
String response = h.query(sreq);
Map rsp = (Map) ObjectBuilder.fromJSON(response);
List doclist = (List) (((Map) rsp.get("response")).get("docs"));
if (doclist.size() == 0) {
// there's no info we can get back with a delete, so not much we can check without further synchronization
} else {
assertEquals(1, doclist.size());
long foundVal = (Long) (((Map) doclist.get(0)).get(FIELD));
long foundVer = (Long) (((Map) doclist.get(0)).get("_version_"));
if (foundVer < Math.abs(info.version) || (foundVer == info.version && foundVal != info.val)) {
// if the version matches, the val must
verbose("ERROR, id=", id, "found=", response, "model", info);
assertTrue(false);
}
}
}
if (rand.nextInt(100) < percentGetLatestVersions) {
getLatestVersions();
// TODO: some sort of validation that the latest version is >= to the latest version we added?
}
} catch (Throwable e) {
operations.set(-1L);
throw new RuntimeException(e);
}
}
};
threads.add(thread);
}
for (Thread thread : threads) {
thread.start();
}
int bufferedAddsApplied = 0;
do {
assertTrue(uLog.getState() == UpdateLog.State.ACTIVE);
// before we start buffering updates, we want to point
// visibleModel away from the live model.
visibleModel = new ConcurrentHashMap<>(model);
synchronized (stateChangeLock) {
uLog.bufferUpdates();
}
assertTrue(uLog.getState() == UpdateLog.State.BUFFERING);
// sometimes wait for a second to allow time for writers to write something
if (random().nextBoolean())
Thread.sleep(random().nextInt(10) + 1);
Future<UpdateLog.RecoveryInfo> recoveryInfoF = uLog.applyBufferedUpdates();
if (recoveryInfoF != null) {
UpdateLog.RecoveryInfo recInfo = null;
int writeThreadNumber = 0;
while (recInfo == null) {
try {
// wait a short period of time for recovery to complete (and to give a chance for more writers to concurrently add docs)
recInfo = recoveryInfoF.get(random().nextInt(100 / nWriteThreads), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// idle one more write thread
verbose("Operation", operations.get(), "Draining permits for write thread", writeThreadNumber);
writePermissions[writeThreadNumber++].drainPermits();
if (writeThreadNumber >= nWriteThreads) {
// if we hit the end, back up and give a few write permits
writeThreadNumber--;
writePermissions[writeThreadNumber].release(random().nextInt(2) + 1);
}
// throttle readers so they don't steal too much CPU from the recovery thread
readPermission.drainPermits();
}
}
bufferedAddsApplied += recInfo.adds;
}
// put all writers back at full blast
for (Semaphore writePerm : writePermissions) {
// I don't think semaphores check for overflow, so we need to check mow many remain
int neededPermits = Integer.MAX_VALUE - writePerm.availablePermits();
if (neededPermits > 0)
writePerm.release(neededPermits);
}
// put back readers at full blast and point back to live model
visibleModel = model;
int neededPermits = Integer.MAX_VALUE - readPermission.availablePermits();
if (neededPermits > 0)
readPermission.release(neededPermits);
verbose("ROUND=", operations.get());
} while (operations.decrementAndGet() > 0);
verbose("bufferedAddsApplied=", bufferedAddsApplied);
for (Thread thread : threads) {
thread.join();
}
}
use of org.apache.solr.update.UpdateLog in project lucene-solr by apache.
the class OverseerElectionContext method runLeaderProcess.
/*
* weAreReplacement: has someone else been the leader already?
*/
@Override
void runLeaderProcess(boolean weAreReplacement, int pauseBeforeStart) throws KeeperException, InterruptedException, IOException {
String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
ActionThrottle lt;
try (SolrCore core = cc.getCore(coreName)) {
if (core == null) {
if (cc.isShutDown()) {
return;
} else {
throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
}
}
MDCLoggingContext.setCore(core);
lt = core.getUpdateHandler().getSolrCoreState().getLeaderThrottle();
}
try {
lt.minimumWaitBetweenActions();
lt.markAttemptingAction();
int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
log.debug("Running the leader process for shard={} and weAreReplacement={} and leaderVoteWait={}", shardId, weAreReplacement, leaderVoteWait);
// clear the leader in clusterstate
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(), ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection);
Overseer.getStateUpdateQueue(zkClient).offer(Utils.toJSON(m));
boolean allReplicasInLine = false;
if (!weAreReplacement) {
allReplicasInLine = waitForReplicasToComeUp(leaderVoteWait);
} else {
allReplicasInLine = areAllReplicasParticipating();
}
if (isClosed) {
// re-register the cores and handle a new leadership election.
return;
}
Replica.Type replicaType;
try (SolrCore core = cc.getCore(coreName)) {
if (core == null) {
if (!zkController.getCoreContainer().isShutDown()) {
cancelElection();
throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
} else {
return;
}
}
replicaType = core.getCoreDescriptor().getCloudDescriptor().getReplicaType();
// should I be leader?
if (weAreReplacement && !shouldIBeLeader(leaderProps, core, weAreReplacement)) {
rejoinLeaderElection(core);
return;
}
log.info("I may be the new leader - try and sync");
// we are going to attempt to be the leader
// first cancel any current recovery
core.getUpdateHandler().getSolrCoreState().cancelRecovery();
if (weAreReplacement) {
// wait a moment for any floating updates to finish
try {
Thread.sleep(2500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, e);
}
}
PeerSync.PeerSyncResult result = null;
boolean success = false;
try {
result = syncStrategy.sync(zkController, core, leaderProps, weAreReplacement);
success = result.isSuccess();
} catch (Exception e) {
SolrException.log(log, "Exception while trying to sync", e);
result = PeerSync.PeerSyncResult.failure();
}
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
if (!success) {
boolean hasRecentUpdates = false;
if (ulog != null) {
// TODO: we could optimize this if necessary
try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
hasRecentUpdates = !recentUpdates.getVersions(1).isEmpty();
}
}
if (!hasRecentUpdates) {
// before, so become leader anyway if no one else has any versions either
if (result.getOtherHasVersions().orElse(false)) {
log.info("We failed sync, but we have no versions - we can't sync in that case. But others have some versions, so we should not become leader");
success = false;
} else {
log.info("We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway");
success = true;
}
}
}
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(core.getCoreContainer().getZkController().getNodeName() + " synched " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.error("Error in solrcloud_debug block", e);
}
}
if (!success) {
rejoinLeaderElection(core);
return;
}
}
boolean isLeader = true;
if (!isClosed) {
try {
// we must check LIR before registering as leader
checkLIR(coreName, allReplicasInLine);
if (replicaType == Replica.Type.TLOG) {
// stop replicate from old leader
zkController.stopReplicationFromLeader(coreName);
if (weAreReplacement) {
try (SolrCore core = cc.getCore(coreName)) {
Future<UpdateLog.RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().recoverFromCurrentLog();
if (future != null) {
log.info("Replaying tlog before become new leader");
future.get();
} else {
log.info("New leader does not have old tlog to replay");
}
}
}
}
super.runLeaderProcess(weAreReplacement, 0);
try (SolrCore core = cc.getCore(coreName)) {
if (core != null) {
core.getCoreDescriptor().getCloudDescriptor().setLeader(true);
publishActiveIfRegisteredAndNotActive(core);
} else {
return;
}
}
log.info("I am the new leader: " + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
// we made it as leader - send any recovery requests we need to
syncStrategy.requestRecoveries();
} catch (Exception e) {
isLeader = false;
SolrException.log(log, "There was a problem trying to register as the leader", e);
try (SolrCore core = cc.getCore(coreName)) {
if (core == null) {
log.debug("SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
return;
}
core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
// we could not publish ourselves as leader - try and rejoin election
rejoinLeaderElection(core);
}
}
if (isLeader) {
// check for any replicas in my shard that were set to down by the previous leader
try {
startLeaderInitiatedRecoveryOnReplicas(coreName);
} catch (Exception exc) {
// don't want leader election to fail because of
// an error trying to tell others to recover
}
}
} else {
cancelElection();
}
} finally {
MDCLoggingContext.clear();
}
}
use of org.apache.solr.update.UpdateLog in project lucene-solr by apache.
the class CdcrRequestHandler method handleQueuesAction.
private void handleQueuesAction(SolrQueryRequest req, SolrQueryResponse rsp) {
NamedList hosts = new NamedList();
for (CdcrReplicatorState state : replicatorManager.getReplicatorStates()) {
NamedList queueStats = new NamedList();
CdcrUpdateLog.CdcrLogReader logReader = state.getLogReader();
if (logReader == null) {
String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
String shard = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
log.warn("The log reader for target collection {} is not initialised @ {}:{}", state.getTargetCollection(), collectionName, shard);
queueStats.add(CdcrParams.QUEUE_SIZE, -1l);
} else {
queueStats.add(CdcrParams.QUEUE_SIZE, logReader.getNumberOfRemainingRecords());
}
queueStats.add(CdcrParams.LAST_TIMESTAMP, state.getTimestampOfLastProcessedOperation());
if (hosts.get(state.getZkHost()) == null) {
hosts.add(state.getZkHost(), new NamedList());
}
((NamedList) hosts.get(state.getZkHost())).add(state.getTargetCollection(), queueStats);
}
rsp.add(CdcrParams.QUEUES, hosts);
UpdateLog updateLog = core.getUpdateHandler().getUpdateLog();
rsp.add(CdcrParams.TLOG_TOTAL_SIZE, updateLog.getTotalLogsSize());
rsp.add(CdcrParams.TLOG_TOTAL_COUNT, updateLog.getTotalLogsNumber());
rsp.add(CdcrParams.UPDATE_LOG_SYNCHRONIZER, updateLogSynchronizer.isStarted() ? CdcrParams.ProcessState.STARTED.toLower() : CdcrParams.ProcessState.STOPPED.toLower());
}
use of org.apache.solr.update.UpdateLog in project lucene-solr by apache.
the class RealTimeGetComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
CloudDescriptor cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
if (cloudDesc != null) {
Replica.Type replicaType = cloudDesc.getReplicaType();
if (replicaType != null) {
if (replicaType == Replica.Type.PULL) {
throw new SolrException(ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", cloudDesc.getCoreNodeName(), Replica.Type.PULL));
}
// non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
}
}
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
// This seems rather kludgey, may there is better way to indicate
// that replica can support handling version ranges
String val = params.get("checkCanHandleVersionRanges");
if (val != null) {
rb.rsp.add("canHandleVersionRanges", true);
return;
}
val = params.get("getFingerprint");
if (val != null) {
processGetFingeprint(rb);
return;
}
val = params.get("getVersions");
if (val != null) {
processGetVersions(rb);
return;
}
val = params.get("getUpdates");
if (val != null) {
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = req.getCore().getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(req.getCore().getCoreContainer().getZkController().getNodeName() + " min count to sync to (from most recent searcher view) " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
log.debug("Error in solrcloud_debug block", e);
}
}
processGetUpdates(rb);
return;
}
val = params.get("getInputDocument");
if (val != null) {
processGetInputDocument(rb);
return;
}
final IdsRequsted reqIds = IdsRequsted.parseParams(req);
if (reqIds.allIds.isEmpty()) {
return;
}
// parse any existing filters
try {
String[] fqs = req.getParams().getParams(CommonParams.FQ);
if (fqs != null && fqs.length != 0) {
List<Query> filters = rb.getFilters();
// if filters already exists, make a copy instead of modifying the original
filters = filters == null ? new ArrayList<Query>(fqs.length) : new ArrayList<>(filters);
for (String fq : fqs) {
if (fq != null && fq.trim().length() != 0) {
QParser fqp = QParser.getParser(fq, req);
filters.add(fqp.getQuery());
}
}
if (!filters.isEmpty()) {
rb.setFilters(filters);
}
}
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
final SolrCore core = req.getCore();
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
FieldType fieldType = idField.getType();
SolrDocumentList docList = new SolrDocumentList();
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
SearcherInfo searcherInfo = new SearcherInfo(core);
// this is initialized & set on the context *after* any searcher (re-)opening
ResultContext resultContext = null;
final DocTransformer transformer = rsp.getReturnFields().getTransformer();
// true in any situation where we have to use a realtime searcher rather then returning docs
// directly from the UpdateLog
final boolean mustUseRealtimeSearcher = // if we have filters, we need to check those against the indexed form of the doc
(rb.getFilters() != null) || ((null != transformer) && transformer.needsSolrIndexSearcher());
try {
BytesRefBuilder idBytes = new BytesRefBuilder();
for (String idStr : reqIds.allIds) {
fieldType.readableToIndexed(idStr, idBytes);
if (ulog != null) {
Object o = ulog.lookup(idBytes.get());
if (o != null) {
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List) o;
assert entry.size() >= 3;
int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
switch(oper) {
// fall through to ADD
case UpdateLog.UPDATE_INPLACE:
case UpdateLog.ADD:
if (mustUseRealtimeSearcher) {
// close handles to current searchers & result context
searcherInfo.clear();
resultContext = null;
// force open a new realtime searcher
ulog.openRealtimeSearcher();
// pretend we never found this record and fall through to use the searcher
o = null;
break;
}
SolrDocument doc;
if (oper == UpdateLog.ADD) {
doc = toSolrDoc((SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
} else if (oper == UpdateLog.UPDATE_INPLACE) {
assert entry.size() == 5;
// For in-place update case, we have obtained the partial document till now. We need to
// resolve it to a full document to be returned to the user.
doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument) entry.get(entry.size() - 1), entry, null);
if (doc == null) {
// document has been deleted as the resolve was going on
break;
}
} else {
throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
}
if (transformer != null) {
// unknown docID
transformer.transform(doc, -1, 0);
}
docList.add(doc);
break;
case UpdateLog.DELETE:
break;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
}
if (o != null)
continue;
}
}
// didn't find it in the update log, so it should be in the newest searcher opened
searcherInfo.init();
// don't bother with ResultContext yet, we won't need it if doc doesn't match filters
int docid = -1;
long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
if (segAndId >= 0) {
int segid = (int) segAndId;
LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
docid = segid + ctx.docBase;
if (rb.getFilters() != null) {
for (Query raw : rb.getFilters()) {
Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
Scorer scorer = searcherInfo.getSearcher().createWeight(q, false, 1f).scorer(ctx);
if (scorer == null || segid != scorer.iterator().advance(segid)) {
// filter doesn't match.
docid = -1;
break;
}
}
}
}
if (docid < 0)
continue;
Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
if (null != transformer) {
if (null == resultContext) {
// either first pass, or we've re-opened searcher - either way now we setContext
resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
transformer.setContext(resultContext);
}
transformer.transform(doc, docid, 0);
}
docList.add(doc);
}
} finally {
searcherInfo.clear();
}
addDocListToResponse(rb, docList);
}
use of org.apache.solr.update.UpdateLog in project lucene-solr by apache.
the class RequestApplyUpdatesOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
String cname = params.get(CoreAdminParams.NAME, "");
CoreAdminOperation.log().info("Applying buffered updates on core: " + cname);
CoreContainer coreContainer = it.handler.coreContainer;
try (SolrCore core = coreContainer.getCore(cname)) {
if (core == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core [" + cname + "] not found");
UpdateLog updateLog = core.getUpdateHandler().getUpdateLog();
if (updateLog.getState() != UpdateLog.State.BUFFERING) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Core " + cname + " not in buffering state");
}
Future<UpdateLog.RecoveryInfo> future = updateLog.applyBufferedUpdates();
if (future == null) {
CoreAdminOperation.log().info("No buffered updates available. core=" + cname);
it.rsp.add("core", cname);
it.rsp.add("status", "EMPTY_BUFFER");
return;
}
UpdateLog.RecoveryInfo report = future.get();
if (report.failed) {
SolrException.log(CoreAdminOperation.log(), "Replay failed");
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Replay failed");
}
coreContainer.getZkController().publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
it.rsp.add("core", cname);
it.rsp.add("status", "BUFFER_APPLIED");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
CoreAdminOperation.log().warn("Recovery was interrupted", e);
} catch (Exception e) {
if (e instanceof SolrException)
throw (SolrException) e;
else
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not apply buffered updates", e);
} finally {
if (it.req != null)
it.req.close();
}
}
Aggregations