use of org.apache.lucene.index.IndexCommit in project neo4j by neo4j.
the class LuceneDataSource method listStoreFiles.
public ResourceIterator<File> listStoreFiles(boolean includeLogicalLogs) throws IOException {
// Never include logical logs since they are of little importance
final Collection<File> files = new ArrayList<>();
final Collection<Pair<SnapshotDeletionPolicy, IndexCommit>> snapshots = new ArrayList<>();
makeSureAllIndexesAreInstantiated();
for (IndexReference writer : getAllIndexes()) {
SnapshotDeletionPolicy deletionPolicy = (SnapshotDeletionPolicy) writer.getWriter().getConfig().getIndexDeletionPolicy();
File indexDirectory = getFileDirectory(baseStorePath, writer.getIdentifier());
IndexCommit commit;
try {
// Throws IllegalStateException if no commits yet
commit = deletionPolicy.snapshot();
} catch (IllegalStateException e) {
/*
* This is insane but happens if we try to snapshot an existing index
* that has no commits. This is a bad API design - it should return null
* or something. This is not exceptional.
*
* For the time being we just do a commit and try again.
*/
writer.getWriter().commit();
commit = deletionPolicy.snapshot();
}
for (String fileName : commit.getFileNames()) {
files.add(new File(indexDirectory, fileName));
}
snapshots.add(Pair.of(deletionPolicy, commit));
}
return new PrefetchingResourceIterator<File>() {
private final Iterator<File> filesIterator = files.iterator();
@Override
protected File fetchNextOrNull() {
return filesIterator.hasNext() ? filesIterator.next() : null;
}
@Override
public void close() {
for (Pair<SnapshotDeletionPolicy, IndexCommit> policyAndCommit : snapshots) {
try {
policyAndCommit.first().release(policyAndCommit.other());
} catch (IOException e) {
// TODO What to do?
e.printStackTrace();
}
}
}
};
}
use of org.apache.lucene.index.IndexCommit in project neo4j by neo4j.
the class LuceneIndexSnapshots method forIndex.
/**
* Create index snapshot iterator for a read only index.
* @param indexFolder index location folder
* @param directory index directory
* @return index file name resource iterator
* @throws IOException
*/
public static ResourceIterator<File> forIndex(File indexFolder, Directory directory) throws IOException {
if (!hasCommits(directory)) {
return emptyIterator();
}
Collection<IndexCommit> indexCommits = DirectoryReader.listCommits(directory);
IndexCommit indexCommit = Iterables.last(indexCommits);
return new ReadOnlyIndexSnapshotFileIterator(indexFolder, indexCommit);
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class ReplicationHandler method handleRequestBody.
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
rsp.setHttpCaching(false);
final SolrParams solrParams = req.getParams();
String command = solrParams.get(COMMAND);
if (command == null) {
rsp.add(STATUS, OK_STATUS);
rsp.add("message", "No command");
return;
}
// It gives the current 'replicateable' index version
if (command.equals(CMD_INDEX_VERSION)) {
// make a copy so it won't change
IndexCommit commitPoint = indexCommitPoint;
if (commitPoint == null) {
// if this handler is 'lazy', we may not have tracked the last commit
// because our commit listener is registered on inform
commitPoint = core.getDeletionPolicy().getLatestCommit();
}
if (commitPoint != null && replicationEnabled.get()) {
//
// There is a race condition here. The commit point may be changed / deleted by the time
// we get around to reserving it. This is a very small window though, and should not result
// in a catastrophic failure, but will result in the client getting an empty file list for
// the CMD_GET_FILE_LIST command.
//
core.getDeletionPolicy().setReserveDuration(commitPoint.getGeneration(), reserveCommitDuration);
rsp.add(CMD_INDEX_VERSION, IndexDeletionPolicyWrapper.getCommitTimestamp(commitPoint));
rsp.add(GENERATION, commitPoint.getGeneration());
} else {
// This happens when replication is not configured to happen after startup and no commit/optimize
// has happened yet.
rsp.add(CMD_INDEX_VERSION, 0L);
rsp.add(GENERATION, 0L);
}
} else if (command.equals(CMD_GET_FILE)) {
getFileStream(solrParams, rsp);
} else if (command.equals(CMD_GET_FILE_LIST)) {
getFileList(solrParams, rsp);
} else if (command.equalsIgnoreCase(CMD_BACKUP)) {
doSnapShoot(new ModifiableSolrParams(solrParams), rsp, req);
rsp.add(STATUS, OK_STATUS);
} else if (command.equalsIgnoreCase(CMD_RESTORE)) {
restore(new ModifiableSolrParams(solrParams), rsp, req);
rsp.add(STATUS, OK_STATUS);
} else if (command.equalsIgnoreCase(CMD_RESTORE_STATUS)) {
rsp.add(CMD_RESTORE_STATUS, getRestoreStatus());
} else if (command.equalsIgnoreCase(CMD_DELETE_BACKUP)) {
deleteSnapshot(new ModifiableSolrParams(solrParams));
rsp.add(STATUS, OK_STATUS);
} else if (command.equalsIgnoreCase(CMD_FETCH_INDEX)) {
String masterUrl = solrParams.get(MASTER_URL);
if (!isSlave && masterUrl == null) {
rsp.add(STATUS, ERR_STATUS);
rsp.add("message", "No slave configured or no 'masterUrl' Specified");
return;
}
final SolrParams paramsCopy = new ModifiableSolrParams(solrParams);
Thread fetchThread = new Thread(() -> doFetch(paramsCopy, false), "explicit-fetchindex-cmd");
fetchThread.setDaemon(false);
fetchThread.start();
if (solrParams.getBool(WAIT, false)) {
fetchThread.join();
}
rsp.add(STATUS, OK_STATUS);
} else if (command.equalsIgnoreCase(CMD_DISABLE_POLL)) {
if (pollingIndexFetcher != null) {
disablePoll();
rsp.add(STATUS, OK_STATUS);
} else {
rsp.add(STATUS, ERR_STATUS);
rsp.add("message", "No slave configured");
}
} else if (command.equalsIgnoreCase(CMD_ENABLE_POLL)) {
if (pollingIndexFetcher != null) {
enablePoll();
rsp.add(STATUS, OK_STATUS);
} else {
rsp.add(STATUS, ERR_STATUS);
rsp.add("message", "No slave configured");
}
} else if (command.equalsIgnoreCase(CMD_ABORT_FETCH)) {
if (abortFetch()) {
rsp.add(STATUS, OK_STATUS);
} else {
rsp.add(STATUS, ERR_STATUS);
rsp.add("message", "No slave configured");
}
} else if (command.equals(CMD_SHOW_COMMITS)) {
rsp.add(CMD_SHOW_COMMITS, getCommits());
} else if (command.equals(CMD_DETAILS)) {
rsp.add(CMD_DETAILS, getReplicationDetails(solrParams.getBool("slave", true)));
} else if (CMD_ENABLE_REPL.equalsIgnoreCase(command)) {
replicationEnabled.set(true);
rsp.add(STATUS, OK_STATUS);
} else if (CMD_DISABLE_REPL.equalsIgnoreCase(command)) {
replicationEnabled.set(false);
rsp.add(STATUS, OK_STATUS);
}
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class ReplicationHandler method getReplicationDetails.
/**
* Used for showing statistics and progress information.
*/
private NamedList<Object> getReplicationDetails(boolean showSlaveDetails) {
NamedList<Object> details = new SimpleOrderedMap<>();
NamedList<Object> master = new SimpleOrderedMap<>();
NamedList<Object> slave = new SimpleOrderedMap<>();
details.add("indexSize", NumberUtils.readableSize(core.getIndexSize()));
details.add("indexPath", core.getIndexDir());
details.add(CMD_SHOW_COMMITS, getCommits());
details.add("isMaster", String.valueOf(isMaster));
details.add("isSlave", String.valueOf(isSlave));
CommitVersionInfo vInfo = getIndexVersion();
details.add("indexVersion", null == vInfo ? 0 : vInfo.version);
details.add(GENERATION, null == vInfo ? 0 : vInfo.generation);
// make a copy so it won't change
IndexCommit commit = indexCommitPoint;
if (isMaster) {
if (includeConfFiles != null)
master.add(CONF_FILES, includeConfFiles);
master.add(REPLICATE_AFTER, getReplicateAfterStrings());
master.add("replicationEnabled", String.valueOf(replicationEnabled.get()));
}
if (isMaster && commit != null) {
CommitVersionInfo repCommitInfo = CommitVersionInfo.build(commit);
master.add("replicableVersion", repCommitInfo.version);
master.add("replicableGeneration", repCommitInfo.generation);
}
IndexFetcher fetcher = currentIndexFetcher;
if (fetcher != null) {
Properties props = loadReplicationProperties();
if (showSlaveDetails) {
try {
NamedList nl = fetcher.getDetails();
slave.add("masterDetails", nl.get(CMD_DETAILS));
} catch (Exception e) {
LOG.warn("Exception while invoking 'details' method for replication on master ", e);
slave.add(ERR_STATUS, "invalid_master");
}
}
slave.add(MASTER_URL, fetcher.getMasterUrl());
if (getPollInterval() != null) {
slave.add(POLL_INTERVAL, getPollInterval());
}
Date nextScheduled = getNextScheduledExecTime();
if (nextScheduled != null && !isPollingDisabled()) {
slave.add(NEXT_EXECUTION_AT, nextScheduled.toString());
} else if (isPollingDisabled()) {
slave.add(NEXT_EXECUTION_AT, "Polling disabled");
}
addVal(slave, IndexFetcher.INDEX_REPLICATED_AT, props, Date.class);
addVal(slave, IndexFetcher.INDEX_REPLICATED_AT_LIST, props, List.class);
addVal(slave, IndexFetcher.REPLICATION_FAILED_AT_LIST, props, List.class);
addVal(slave, IndexFetcher.TIMES_INDEX_REPLICATED, props, Integer.class);
addVal(slave, IndexFetcher.CONF_FILES_REPLICATED, props, Integer.class);
addVal(slave, IndexFetcher.TIMES_CONFIG_REPLICATED, props, Integer.class);
addVal(slave, IndexFetcher.CONF_FILES_REPLICATED_AT, props, Integer.class);
addVal(slave, IndexFetcher.LAST_CYCLE_BYTES_DOWNLOADED, props, Long.class);
addVal(slave, IndexFetcher.TIMES_FAILED, props, Integer.class);
addVal(slave, IndexFetcher.REPLICATION_FAILED_AT, props, Date.class);
addVal(slave, IndexFetcher.PREVIOUS_CYCLE_TIME_TAKEN, props, Long.class);
slave.add("currentDate", new Date().toString());
slave.add("isPollingDisabled", String.valueOf(isPollingDisabled()));
boolean isReplicating = isReplicating();
slave.add("isReplicating", String.valueOf(isReplicating));
if (isReplicating) {
try {
long bytesToDownload = 0;
List<String> filesToDownload = new ArrayList<>();
for (Map<String, Object> file : fetcher.getFilesToDownload()) {
filesToDownload.add((String) file.get(NAME));
bytesToDownload += (Long) file.get(SIZE);
}
//get list of conf files to download
for (Map<String, Object> file : fetcher.getConfFilesToDownload()) {
filesToDownload.add((String) file.get(NAME));
bytesToDownload += (Long) file.get(SIZE);
}
slave.add("filesToDownload", filesToDownload);
slave.add("numFilesToDownload", String.valueOf(filesToDownload.size()));
slave.add("bytesToDownload", NumberUtils.readableSize(bytesToDownload));
long bytesDownloaded = 0;
List<String> filesDownloaded = new ArrayList<>();
for (Map<String, Object> file : fetcher.getFilesDownloaded()) {
filesDownloaded.add((String) file.get(NAME));
bytesDownloaded += (Long) file.get(SIZE);
}
//get list of conf files downloaded
for (Map<String, Object> file : fetcher.getConfFilesDownloaded()) {
filesDownloaded.add((String) file.get(NAME));
bytesDownloaded += (Long) file.get(SIZE);
}
Map<String, Object> currentFile = fetcher.getCurrentFile();
String currFile = null;
long currFileSize = 0, currFileSizeDownloaded = 0;
float percentDownloaded = 0;
if (currentFile != null) {
currFile = (String) currentFile.get(NAME);
currFileSize = (Long) currentFile.get(SIZE);
if (currentFile.containsKey("bytesDownloaded")) {
currFileSizeDownloaded = (Long) currentFile.get("bytesDownloaded");
bytesDownloaded += currFileSizeDownloaded;
if (currFileSize > 0)
percentDownloaded = (currFileSizeDownloaded * 100) / currFileSize;
}
}
slave.add("filesDownloaded", filesDownloaded);
slave.add("numFilesDownloaded", String.valueOf(filesDownloaded.size()));
long estimatedTimeRemaining = 0;
Date replicationStartTimeStamp = fetcher.getReplicationStartTimeStamp();
if (replicationStartTimeStamp != null) {
slave.add("replicationStartTime", replicationStartTimeStamp.toString());
}
long elapsed = fetcher.getReplicationTimeElapsed();
slave.add("timeElapsed", String.valueOf(elapsed) + "s");
if (bytesDownloaded > 0)
estimatedTimeRemaining = ((bytesToDownload - bytesDownloaded) * elapsed) / bytesDownloaded;
float totalPercent = 0;
long downloadSpeed = 0;
if (bytesToDownload > 0)
totalPercent = (bytesDownloaded * 100) / bytesToDownload;
if (elapsed > 0)
downloadSpeed = (bytesDownloaded / elapsed);
if (currFile != null)
slave.add("currentFile", currFile);
slave.add("currentFileSize", NumberUtils.readableSize(currFileSize));
slave.add("currentFileSizeDownloaded", NumberUtils.readableSize(currFileSizeDownloaded));
slave.add("currentFileSizePercent", String.valueOf(percentDownloaded));
slave.add("bytesDownloaded", NumberUtils.readableSize(bytesDownloaded));
slave.add("totalPercent", String.valueOf(totalPercent));
slave.add("timeRemaining", String.valueOf(estimatedTimeRemaining) + "s");
slave.add("downloadSpeed", NumberUtils.readableSize(downloadSpeed));
} catch (Exception e) {
LOG.error("Exception while writing replication details: ", e);
}
}
}
if (isMaster)
details.add("master", master);
if (slave.size() > 0)
details.add("slave", slave);
NamedList snapshotStats = snapShootDetails;
if (snapshotStats != null)
details.add(CMD_BACKUP, snapshotStats);
return details;
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class ReplicationHandler method inform.
@Override
@SuppressWarnings("unchecked")
public void inform(SolrCore core) {
this.core = core;
registerCloseHook();
Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
if (nbtk != null) {
numberBackupsToKeep = Integer.parseInt(nbtk.toString());
} else {
numberBackupsToKeep = 0;
}
NamedList slave = (NamedList) initArgs.get("slave");
boolean enableSlave = isEnabled(slave);
if (enableSlave) {
currentIndexFetcher = pollingIndexFetcher = new IndexFetcher(slave, this, core);
setupPolling((String) slave.get(POLL_INTERVAL));
isSlave = true;
}
NamedList master = (NamedList) initArgs.get("master");
boolean enableMaster = isEnabled(master);
if (enableMaster || enableSlave) {
if (core.getCoreContainer().getZkController() != null) {
LOG.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" + " intend this behavior, it usually indicates a mis-configuration. Master setting is " + Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave));
}
}
if (!enableSlave && !enableMaster) {
enableMaster = true;
master = new NamedList<>();
}
if (enableMaster) {
includeConfFiles = (String) master.get(CONF_FILES);
if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
List<String> files = Arrays.asList(includeConfFiles.split(","));
for (String file : files) {
if (file.trim().length() == 0)
continue;
String[] strs = file.trim().split(":");
// if there is an alias add it or it is null
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
}
LOG.info("Replication enabled for following config files: " + includeConfFiles);
}
List backup = master.getAll("backupAfter");
boolean backupOnCommit = backup.contains("commit");
boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
List replicateAfter = master.getAll(REPLICATE_AFTER);
replicateOnCommit = replicateAfter.contains("commit");
replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");
if (!replicateOnCommit && !replicateOnOptimize) {
replicateOnCommit = true;
}
// save the last optimized commit point.
if (replicateOnOptimize) {
IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
if (policy instanceof SolrDeletionPolicy) {
SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy) policy;
if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
solrPolicy.setMaxOptimizedCommitsToKeep(1);
}
} else {
LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
}
}
if (replicateOnOptimize || backupOnOptimize) {
core.getUpdateHandler().registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
}
if (replicateOnCommit || backupOnCommit) {
replicateOnCommit = true;
core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
}
if (replicateAfter.contains("startup")) {
replicateOnStart = true;
RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
try {
DirectoryReader reader = s == null ? null : s.get().getIndexReader();
if (reader != null && reader.getIndexCommit() != null && reader.getIndexCommit().getGeneration() != 1L) {
try {
if (replicateOnOptimize) {
Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
for (IndexCommit ic : commits) {
if (ic.getSegmentCount() == 1) {
if (indexCommitPoint == null || indexCommitPoint.getGeneration() < ic.getGeneration())
indexCommitPoint = ic;
}
}
} else {
indexCommitPoint = reader.getIndexCommit();
}
} finally {
// We don't need to save commit points for replication, the SolrDeletionPolicy
// always saves the last commit point (and the last optimized commit point, if needed)
/***
if(indexCommitPoint != null){
core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
}
***/
}
}
// ensure the writer is init'd so that we have a list of commit points
RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
iw.decref();
} catch (IOException e) {
LOG.warn("Unable to get IndexCommit on startup", e);
} finally {
if (s != null)
s.decref();
}
}
String reserve = (String) master.get(RESERVE);
if (reserve != null && !reserve.trim().equals("")) {
reserveCommitDuration = readIntervalMs(reserve);
}
LOG.info("Commits will be reserved for " + reserveCommitDuration);
isMaster = true;
}
}
Aggregations