use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class DirectUpdateHandler2 method commit.
@Override
public void commit(CommitUpdateCommand cmd) throws IOException {
if (cmd.prepareCommit) {
prepareCommit(cmd);
return;
}
if (cmd.optimize) {
optimizeCommands.mark();
} else {
commitCommands.mark();
if (cmd.expungeDeletes)
expungeDeleteCommands.mark();
}
Future[] waitSearcher = null;
if (cmd.waitSearcher) {
waitSearcher = new Future[1];
}
boolean error = true;
try {
// only allow one hard commit to proceed at once
if (!cmd.softCommit) {
solrCoreState.getCommitLock().lock();
}
log.info("start " + cmd);
if (cmd.openSearcher) {
// we can cancel any pending soft commits if this commit will open a new searcher
softCommitTracker.cancelPendingCommit();
}
if (!cmd.softCommit && (cmd.openSearcher || !commitTracker.getOpenSearcher())) {
// cancel a pending hard commit if this commit is of equal or greater "strength"...
// If the autoCommit has openSearcher=true, then this commit must have openSearcher=true
// to cancel.
commitTracker.cancelPendingCommit();
}
RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
try {
IndexWriter writer = iw.get();
if (cmd.optimize) {
if (cmd.maxOptimizeSegments == 1) {
log.warn("Starting optimize... Reading and rewriting the entire index! Use with care.");
} else {
log.warn("Starting optimize... Reading and rewriting a potentially large percent of the entire index, reducing to " + cmd.maxOptimizeSegments + " segments");
}
writer.forceMerge(cmd.maxOptimizeSegments);
} else if (cmd.expungeDeletes) {
log.warn("Starting expungeDeletes... Reading and rewriting segments with enough deletes, potentially the entire index");
writer.forceMergeDeletes();
}
if (!cmd.softCommit) {
synchronized (solrCoreState.getUpdateLock()) {
// postSoft... see postSoft comments.
if (ulog != null)
ulog.preCommit(cmd);
}
if (writer.hasUncommittedChanges()) {
SolrIndexWriter.setCommitData(writer, cmd.getVersion());
writer.commit();
} else {
log.info("No uncommitted changes. Skipping IW.commit.");
}
// SolrCore.verbose("writer.commit() end");
numDocsPending.reset();
callPostCommitCallbacks();
}
} finally {
iw.decref();
}
if (cmd.optimize) {
callPostOptimizeCallbacks();
}
if (cmd.softCommit) {
// ulog.preSoftCommit();
synchronized (solrCoreState.getUpdateLock()) {
if (ulog != null)
ulog.preSoftCommit(cmd);
core.getSearcher(true, false, waitSearcher, true);
if (ulog != null)
ulog.postSoftCommit(cmd);
}
callPostSoftCommitCallbacks();
} else {
synchronized (solrCoreState.getUpdateLock()) {
if (ulog != null)
ulog.preSoftCommit(cmd);
if (cmd.openSearcher) {
core.getSearcher(true, false, waitSearcher);
} else {
// force open a new realtime searcher so realtime-get and versioning code can see the latest
RefCounted<SolrIndexSearcher> searchHolder = core.openNewSearcher(true, true);
searchHolder.decref();
}
if (ulog != null)
ulog.postSoftCommit(cmd);
}
// postCommit currently means new searcher has
if (ulog != null)
ulog.postCommit(cmd);
// also been opened
}
if (cmd.softCommit) {
softCommitTracker.didCommit();
} else {
commitTracker.didCommit();
}
log.info("end_commit_flush");
error = false;
} finally {
if (!cmd.softCommit) {
solrCoreState.getCommitLock().unlock();
}
addCommands.reset();
deleteByIdCommands.reset();
deleteByQueryCommands.reset();
if (error) {
numErrors.increment();
numErrorsCumulative.mark();
}
}
// outside any synchronized block so that other update operations can proceed.
if (waitSearcher != null && waitSearcher[0] != null) {
try {
waitSearcher[0].get();
} catch (InterruptedException | ExecutionException e) {
SolrException.log(log, e);
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class RecoveryStrategy method replicate.
private final void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops) throws SolrServerException, IOException {
final String leaderUrl = getReplicateLeaderUrl(leaderprops);
LOG.info("Attempting to replicate from [{}].", leaderUrl);
// send commit
commitOnLeader(leaderUrl);
// use rep handler directly, so we can do this sync rather than async
SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH);
ReplicationHandler replicationHandler = (ReplicationHandler) handler;
if (replicationHandler == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Skipping recovery, no " + ReplicationHandler.PATH + " handler found");
}
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
// we check closed on return
if (isClosed())
return;
boolean success = replicationHandler.doFetch(solrParams, false).getSuccessful();
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
}
// solrcloud_debug
if (LOG.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
try {
LOG.debug(core.getCoreContainer().getZkController().getNodeName() + " replicated " + searcher.search(new MatchAllDocsQuery(), 1).totalHits + " from " + leaderUrl + " gen:" + (core.getDeletionPolicy().getLatestCommit() != null ? "null" : core.getDeletionPolicy().getLatestCommit().getGeneration()) + " data:" + core.getDataDir() + " index:" + core.getIndexDir() + " newIndex:" + core.getNewIndexDir() + " files:" + Arrays.asList(dir.listAll()));
} finally {
core.getDirectoryFactory().release(dir);
searchHolder.decref();
}
} catch (Exception e) {
LOG.debug("Error in solrcloud_debug block", e);
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class IndexFetcher method fetchLatestIndex.
/**
* This command downloads all the necessary files from master to install a index commit point. Only changed files are
* downloaded. It also downloads the conf files (if they are modified).
*
* @param forceReplication force a replication in all cases
* @param forceCoreReload force a core reload in all cases
* @return true on success, false if slave is already in sync
* @throws IOException if an exception occurs
*/
IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException {
boolean cleanupDone = false;
boolean successfulInstall = false;
markReplicationStart();
Directory tmpIndexDir = null;
String tmpIndex;
Directory indexDir = null;
String indexDirPath;
boolean deleteTmpIdxDir = true;
File tmpTlogDir = null;
if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) {
// if the last replication was not a success, we force a full replication
// when we are a bit more confident we may want to try a partial replication
// if the error is connection related or something, but we have to be careful
forceReplication = true;
LOG.info("Last replication failed, so I'll force replication");
}
try {
if (fetchFromLeader) {
assert !solrCore.isClosed() : "Replication should be stopped before closing the core";
Replica replica = getLeaderReplica();
CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor();
if (cd.getCoreNodeName().equals(replica.getName())) {
return IndexFetchResult.EXPECTING_NON_LEADER;
}
if (replica.getState() != Replica.State.ACTIVE) {
LOG.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
}
if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
LOG.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
}
if (!replica.getCoreUrl().equals(masterUrl)) {
masterUrl = replica.getCoreUrl();
LOG.info("Updated masterUrl to {}", masterUrl);
// TODO: Do we need to set forceReplication = true?
} else {
LOG.debug("masterUrl didn't change");
}
}
//get the current 'replicateable' index version in the master
NamedList response;
try {
response = getLatestVersion();
} catch (Exception e) {
final String errorMsg = e.toString();
if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
} else {
LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
}
}
long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
long latestGeneration = (Long) response.get(GENERATION);
LOG.info("Master's generation: " + latestGeneration);
LOG.info("Master's version: " + latestVersion);
// TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
if (commit == null) {
// Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points
RefCounted<SolrIndexSearcher> searcherRefCounted = null;
try {
searcherRefCounted = solrCore.getNewestSearcher(false);
if (searcherRefCounted == null) {
LOG.warn("No open searcher found - fetch aborted");
return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
}
commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
} finally {
if (searcherRefCounted != null)
searcherRefCounted.decref();
}
}
LOG.info("Slave's generation: " + commit.getGeneration());
LOG.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
if (latestVersion == 0L) {
if (forceReplication && commit.getGeneration() != 0) {
// since we won't get the files for an empty index,
// we just clear ours and commit
LOG.info("New index in Master. Deleting mine...");
RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
try {
iw.get().deleteAll();
} finally {
iw.decref();
}
SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams());
solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
}
//there is nothing to be replicated
successfulInstall = true;
LOG.debug("Nothing to replicate, master's version is 0");
return IndexFetchResult.MASTER_VERSION_ZERO;
}
// TODO: Should we be comparing timestamps (across machines) here?
if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
//master and slave are already in sync just return
LOG.info("Slave in sync with master.");
successfulInstall = true;
return IndexFetchResult.ALREADY_IN_SYNC;
}
LOG.info("Starting replication process");
// get the list of files first
fetchFileList(latestGeneration);
// this can happen if the commit point is deleted before we fetch the file list.
if (filesToDownload.isEmpty()) {
return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
}
LOG.info("Number of files in latest index in master: " + filesToDownload.size());
if (tlogFilesToDownload != null) {
LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size());
}
// Create the sync service
fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService"));
// use a synchronized list because the list is read by other threads (to show details)
filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
// if the generation of master is older than that of the slave , it means they are not compatible to be copied
// then a new index directory to be created and all the files need to be copied
boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || commit.getGeneration() >= latestGeneration || forceReplication;
String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
String tmpIdxDirName = "index." + timestamp;
tmpIndex = solrCore.getDataDir() + tmpIdxDirName;
tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndex, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
// tmp dir for tlog files
if (tlogFilesToDownload != null) {
tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp);
}
// cindex dir...
indexDirPath = solrCore.getIndexDir();
indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
try {
//in the metadata. If there is a mismatch for the same index file then we download the entire index again.
if (!isFullCopyNeeded && isIndexStale(indexDir)) {
isFullCopyNeeded = true;
}
if (!isFullCopyNeeded && !fetchFromLeader) {
// and wait until we are able to clean up all unused lucene files
if (solrCore.getCoreContainer().isZooKeeperAware()) {
solrCore.closeSearcher();
}
// rollback and reopen index writer and wait until all unused files
// are successfully deleted
solrCore.getUpdateHandler().newIndexWriter(true);
RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
try {
IndexWriter indexWriter = writer.get();
int c = 0;
indexWriter.deleteUnusedFiles();
while (hasUnusedFiles(indexDir, commit)) {
indexWriter.deleteUnusedFiles();
LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
Thread.sleep(1000);
c++;
if (c >= 30) {
LOG.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
isFullCopyNeeded = true;
break;
}
}
if (c > 0) {
LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
}
} finally {
writer.decref();
}
}
boolean reloadCore = false;
try {
// we have to be careful and do this after we know isFullCopyNeeded won't be flipped
if (!isFullCopyNeeded) {
solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
}
LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
successfulInstall = false;
long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir, tmpIndexDir, latestGeneration);
if (tlogFilesToDownload != null) {
bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration);
// reload update log
reloadCore = true;
}
final long timeTakenSeconds = getReplicationTimeElapsed();
final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? new Long(bytesDownloaded / timeTakenSeconds) : null);
LOG.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
if (!modifiedConfFiles.isEmpty()) {
reloadCore = true;
downloadConfFiles(confFilesToDownload, latestGeneration);
if (isFullCopyNeeded) {
successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
if (successfulInstall)
deleteTmpIdxDir = false;
} else {
successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
}
if (tlogFilesToDownload != null) {
// move tlog files and refresh ulog only if we successfully installed a new index
successfulInstall &= moveTlogFiles(tmpTlogDir);
}
if (successfulInstall) {
if (isFullCopyNeeded) {
// may be closed
if (indexDir != null) {
solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
// Cleanup all index files not associated with any *named* snapshot.
solrCore.deleteNonSnapshotIndexFiles(indexDirPath);
}
}
LOG.info("Configuration files are modified, core will be reloaded");
logReplicationTimeAndConfFiles(modifiedConfFiles, // write to a file time of replication and
successfulInstall);
// conf files.
}
} else {
terminateAndWaitFsyncService();
if (isFullCopyNeeded) {
successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
if (successfulInstall)
deleteTmpIdxDir = false;
} else {
successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
}
if (tlogFilesToDownload != null) {
// move tlog files and refresh ulog only if we successfully installed a new index
successfulInstall &= moveTlogFiles(tmpTlogDir);
}
if (successfulInstall) {
logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
}
}
} finally {
if (!isFullCopyNeeded) {
solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore);
}
}
// we must reload the core after we open the IW back up
if (successfulInstall && (reloadCore || forceCoreReload)) {
LOG.info("Reloading SolrCore {}", solrCore.getName());
reloadCore();
}
if (successfulInstall) {
if (isFullCopyNeeded) {
// may be closed
if (indexDir != null) {
LOG.info("removing old index directory " + indexDir);
solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
solrCore.getDirectoryFactory().remove(indexDir);
}
}
if (isFullCopyNeeded) {
solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
}
openNewSearcherAndUpdateCommitPoint();
}
if (!isFullCopyNeeded && !forceReplication && !successfulInstall) {
cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
cleanupDone = true;
// we try with a full copy of the index
LOG.warn("Replication attempt was not successful - trying a full index replication reloadCore={}", reloadCore);
successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
}
markReplicationStop();
return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
} catch (ReplicationHandlerException e) {
LOG.error("User aborted Replication");
return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
} catch (SolrException e) {
throw e;
} catch (InterruptedException e) {
throw new InterruptedException("Index fetch interrupted");
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
}
} finally {
if (!cleanupDone) {
cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class MoreLikeThisHandler method handleRequestBody.
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
SolrParams params = req.getParams();
long timeAllowed = (long) params.getInt(CommonParams.TIME_ALLOWED, -1);
if (timeAllowed > 0) {
SolrQueryTimeoutImpl.set(timeAllowed);
}
try {
// Set field flags
ReturnFields returnFields = new SolrReturnFields(req);
rsp.setReturnFields(returnFields);
int flags = 0;
if (returnFields.wantsScore()) {
flags |= SolrIndexSearcher.GET_SCORES;
}
String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
String q = params.get(CommonParams.Q);
Query query = null;
SortSpec sortSpec = null;
List<Query> filters = null;
try {
if (q != null) {
QParser parser = QParser.getParser(q, defType, req);
query = parser.getQuery();
sortSpec = parser.getSortSpec(true);
}
String[] fqs = req.getParams().getParams(CommonParams.FQ);
if (fqs != null && fqs.length != 0) {
filters = new ArrayList<>();
for (String fq : fqs) {
if (fq != null && fq.trim().length() != 0) {
QParser fqp = QParser.getParser(fq, req);
filters.add(fqp.getQuery());
}
}
}
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
SolrIndexSearcher searcher = req.getSearcher();
MoreLikeThisHelper mlt = new MoreLikeThisHelper(params, searcher);
// Hold on to the interesting terms if relevant
TermStyle termStyle = TermStyle.get(params.get(MoreLikeThisParams.INTERESTING_TERMS));
List<InterestingTerm> interesting = (termStyle == TermStyle.NONE) ? null : new ArrayList<>(mlt.mlt.getMaxQueryTerms());
DocListAndSet mltDocs = null;
// Parse Required Params
// This will either have a single Reader or valid query
Reader reader = null;
try {
if (q == null || q.trim().length() < 1) {
Iterable<ContentStream> streams = req.getContentStreams();
if (streams != null) {
Iterator<ContentStream> iter = streams.iterator();
if (iter.hasNext()) {
reader = iter.next().getReader();
}
if (iter.hasNext()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "MoreLikeThis does not support multiple ContentStreams");
}
}
}
int start = params.getInt(CommonParams.START, CommonParams.START_DEFAULT);
int rows = params.getInt(CommonParams.ROWS, CommonParams.ROWS_DEFAULT);
// --------------------------------------------------------------------------------
if (reader != null) {
mltDocs = mlt.getMoreLikeThis(reader, start, rows, filters, interesting, flags);
} else if (q != null) {
// Matching options
boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE, true);
int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);
// Find the base match
DocList match = searcher.getDocList(query, null, null, matchOffset, 1, // only get the first one...
flags);
if (includeMatch) {
rsp.add("match", match);
}
// This is an iterator, but we only handle the first match
DocIterator iterator = match.iterator();
if (iterator.hasNext()) {
// do a MoreLikeThis query for each document in results
int id = iterator.nextDoc();
mltDocs = mlt.getMoreLikeThis(id, start, rows, filters, interesting, flags);
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "MoreLikeThis requires either a query (?q=) or text to find similar documents.");
}
} finally {
if (reader != null) {
reader.close();
}
}
if (mltDocs == null) {
// avoid NPE
mltDocs = new DocListAndSet();
}
rsp.addResponse(mltDocs.docList);
if (interesting != null) {
if (termStyle == TermStyle.DETAILS) {
NamedList<Float> it = new NamedList<>();
for (InterestingTerm t : interesting) {
it.add(t.term.toString(), t.boost);
}
rsp.add("interestingTerms", it);
} else {
List<String> it = new ArrayList<>(interesting.size());
for (InterestingTerm t : interesting) {
it.add(t.term.text());
}
rsp.add("interestingTerms", it);
}
}
// maybe facet the results
if (params.getBool(FacetParams.FACET, false)) {
if (mltDocs.docSet == null) {
rsp.add("facet_counts", null);
} else {
SimpleFacets f = new SimpleFacets(req, mltDocs.docSet, params);
rsp.add("facet_counts", FacetComponent.getFacetCounts(f));
}
}
boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);
boolean dbgQuery = false, dbgResults = false;
if (dbg == false) {
//if it's true, we are doing everything anyway.
String[] dbgParams = req.getParams().getParams(CommonParams.DEBUG);
if (dbgParams != null) {
for (String dbgParam : dbgParams) {
if (dbgParam.equals(CommonParams.QUERY)) {
dbgQuery = true;
} else if (dbgParam.equals(CommonParams.RESULTS)) {
dbgResults = true;
}
}
}
} else {
dbgQuery = true;
dbgResults = true;
}
// Copied from StandardRequestHandler... perhaps it should be added to doStandardDebug?
if (dbg == true) {
try {
NamedList<Object> dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.getRawMLTQuery(), mltDocs.docList, dbgQuery, dbgResults);
if (null != dbgInfo) {
if (null != filters) {
dbgInfo.add("filter_queries", req.getParams().getParams(CommonParams.FQ));
List<String> fqs = new ArrayList<>(filters.size());
for (Query fq : filters) {
fqs.add(QueryParsing.toString(fq, req.getSchema()));
}
dbgInfo.add("parsed_filter_queries", fqs);
}
rsp.add("debug", dbgInfo);
}
} catch (Exception e) {
SolrException.log(log, "Exception during debug", e);
rsp.add("exception_during_debug", SolrException.toStr(e));
}
}
} catch (ExitableDirectoryReader.ExitingReaderException ex) {
log.warn("Query: " + req.getParamString() + "; " + ex.getMessage());
} finally {
SolrQueryTimeoutImpl.reset();
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class ReplicationHandler method getEventListener.
/**
* Register a listener for postcommit/optimize
*
* @param snapshoot do a snapshoot
* @param getCommit get a commitpoint also
*
* @return an instance of the eventlistener
*/
private SolrEventListener getEventListener(final boolean snapshoot, final boolean getCommit) {
return new SolrEventListener() {
@Override
public void init(NamedList args) {
/*no op*/
}
/**
* This refreshes the latest replicateable index commit and optionally can create Snapshots as well
*/
@Override
public void postCommit() {
IndexCommit currentCommitPoint = core.getDeletionPolicy().getLatestCommit();
if (getCommit) {
// IndexCommit oldCommitPoint = indexCommitPoint;
indexCommitPoint = currentCommitPoint;
// We don't need to save commit points for replication, the SolrDeletionPolicy
// always saves the last commit point (and the last optimized commit point, if needed)
/***
if (indexCommitPoint != null) {
core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
}
if(oldCommitPoint != null){
core.getDeletionPolicy().releaseCommitPointAndExtendReserve(oldCommitPoint.getGeneration());
}
***/
}
if (snapshoot) {
try {
int numberToKeep = numberBackupsToKeep;
if (numberToKeep < 1) {
numberToKeep = Integer.MAX_VALUE;
}
SnapShooter snapShooter = new SnapShooter(core, null, null);
snapShooter.validateCreateSnapshot();
snapShooter.createSnapAsync(currentCommitPoint, numberToKeep, (nl) -> snapShootDetails = nl);
} catch (Exception e) {
LOG.error("Exception while snapshooting", e);
}
}
}
@Override
public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
/*no op*/
}
@Override
public void postSoftCommit() {
}
};
}
Aggregations