use of org.apache.solr.core.IndexDeletionPolicyWrapper in project lucene-solr by apache.
the class ReplicationHandler method inform.
@Override
@SuppressWarnings("unchecked")
public void inform(SolrCore core) {
this.core = core;
registerCloseHook();
Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
if (nbtk != null) {
numberBackupsToKeep = Integer.parseInt(nbtk.toString());
} else {
numberBackupsToKeep = 0;
}
NamedList slave = (NamedList) initArgs.get("slave");
boolean enableSlave = isEnabled(slave);
if (enableSlave) {
currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(slave, this, core);
setupPolling((String) slave.get(POLL_INTERVAL));
isSlave = true;
}
NamedList master = (NamedList) initArgs.get("master");
boolean enableMaster = isEnabled(master);
if (enableMaster || enableSlave) {
if (core.getCoreContainer().getZkController() != null) {
LOG.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" + " intend this behavior, it usually indicates a mis-configuration. Master setting is " + Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave));
}
}
if (!enableSlave && !enableMaster) {
enableMaster = true;
master = new NamedList<>();
}
if (enableMaster) {
includeConfFiles = (String) master.get(CONF_FILES);
if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
List<String> files = Arrays.asList(includeConfFiles.split(","));
for (String file : files) {
if (file.trim().length() == 0)
continue;
String[] strs = file.trim().split(":");
// if there is an alias add it or it is null
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
}
LOG.info("Replication enabled for following config files: " + includeConfFiles);
}
List backup = master.getAll("backupAfter");
boolean backupOnCommit = backup.contains("commit");
boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
List replicateAfter = master.getAll(REPLICATE_AFTER);
replicateOnCommit = replicateAfter.contains("commit");
replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");
if (!replicateOnCommit && !replicateOnOptimize) {
replicateOnCommit = true;
}
// save the last optimized commit point.
if (replicateOnOptimize) {
IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
if (policy instanceof SolrDeletionPolicy) {
SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy) policy;
if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
solrPolicy.setMaxOptimizedCommitsToKeep(1);
}
} else {
LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
}
}
if (replicateOnOptimize || backupOnOptimize) {
core.getUpdateHandler().registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
}
if (replicateOnCommit || backupOnCommit) {
replicateOnCommit = true;
core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
}
if (replicateAfter.contains("startup")) {
replicateOnStart = true;
RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
try {
DirectoryReader reader = s == null ? null : s.get().getIndexReader();
if (reader != null && reader.getIndexCommit() != null && reader.getIndexCommit().getGeneration() != 1L) {
try {
if (replicateOnOptimize) {
Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
for (IndexCommit ic : commits) {
if (ic.getSegmentCount() == 1) {
if (indexCommitPoint == null || indexCommitPoint.getGeneration() < ic.getGeneration())
indexCommitPoint = ic;
}
}
} else {
indexCommitPoint = reader.getIndexCommit();
}
} finally {
// We don't need to save commit points for replication, the SolrDeletionPolicy
// always saves the last commit point (and the last optimized commit point, if needed)
/***
if(indexCommitPoint != null){
core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
}
***/
}
}
// ensure the writer is init'd so that we have a list of commit points
RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
iw.decref();
} catch (IOException e) {
LOG.warn("Unable to get IndexCommit on startup", e);
} finally {
if (s != null)
s.decref();
}
}
String reserve = (String) master.get(RESERVE);
if (reserve != null && !reserve.trim().equals("")) {
reserveCommitDuration = readIntervalMs(reserve);
}
LOG.info("Commits will be reserved for " + reserveCommitDuration);
isMaster = true;
}
}
use of org.apache.solr.core.IndexDeletionPolicyWrapper in project lucene-solr by apache.
the class ReplicationHandler method doSnapShoot.
private void doSnapShoot(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest req) {
try {
int numberToKeep = params.getInt(NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM, 0);
if (numberToKeep > 0 && numberBackupsToKeep > 0) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot use " + NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM + " if " + NUMBER_BACKUPS_TO_KEEP_INIT_PARAM + " was specified in the configuration.");
}
numberToKeep = Math.max(numberToKeep, numberBackupsToKeep);
if (numberToKeep < 1) {
numberToKeep = Integer.MAX_VALUE;
}
IndexCommit indexCommit = null;
String commitName = params.get(CoreAdminParams.COMMIT_NAME);
if (commitName != null) {
SolrSnapshotMetaDataManager snapshotMgr = core.getSnapshotMetaDataManager();
Optional<IndexCommit> commit = snapshotMgr.getIndexCommitByName(commitName);
if (commit.isPresent()) {
indexCommit = commit.get();
} else {
throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to find an index commit with name " + commitName + " for core " + core.getName());
}
} else {
IndexDeletionPolicyWrapper delPolicy = core.getDeletionPolicy();
indexCommit = delPolicy.getLatestCommit();
if (indexCommit == null) {
indexCommit = req.getSearcher().getIndexReader().getIndexCommit();
}
}
String location = params.get(CoreAdminParams.BACKUP_LOCATION);
String repoName = params.get(CoreAdminParams.BACKUP_REPOSITORY);
CoreContainer cc = core.getCoreContainer();
BackupRepository repo = null;
if (repoName != null) {
repo = cc.newBackupRepository(Optional.of(repoName));
location = repo.getBackupLocation(location);
if (location == null) {
throw new IllegalArgumentException("location is required");
}
} else {
repo = new LocalFileSystemRepository();
if (location == null) {
location = core.getDataDir();
} else {
location = core.getCoreDescriptor().getInstanceDir().resolve(location).normalize().toString();
}
}
// small race here before the commit point is saved
URI locationUri = repo.createURI(location);
SnapShooter snapShooter = new SnapShooter(repo, core, locationUri, params.get(NAME), commitName);
snapShooter.validateCreateSnapshot();
snapShooter.createSnapAsync(indexCommit, numberToKeep, (nl) -> snapShootDetails = nl);
} catch (Exception e) {
LOG.warn("Exception during creating a snapshot", e);
rsp.add("exception", e);
}
}
use of org.apache.solr.core.IndexDeletionPolicyWrapper in project lucene-solr by apache.
the class SnapShooter method createSnapshot.
public NamedList createSnapshot() throws Exception {
RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
try {
if (commitName != null) {
SolrSnapshotMetaDataManager snapshotMgr = solrCore.getSnapshotMetaDataManager();
Optional<IndexCommit> commit = snapshotMgr.getIndexCommitByName(commitName);
if (commit.isPresent()) {
return createSnapshot(commit.get());
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to find an index commit with name " + commitName + " for core " + solrCore.getName());
} else {
//TODO should we try solrCore.getDeletionPolicy().getLatestCommit() first?
IndexDeletionPolicyWrapper deletionPolicy = solrCore.getDeletionPolicy();
IndexCommit indexCommit = searcher.get().getIndexReader().getIndexCommit();
deletionPolicy.saveCommitPoint(indexCommit.getGeneration());
try {
return createSnapshot(indexCommit);
} finally {
deletionPolicy.releaseCommitPoint(indexCommit.getGeneration());
}
}
} finally {
searcher.decref();
}
}
Aggregations