use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class IndexFetcher method openNewSearcherAndUpdateCommitPoint.
private void openNewSearcherAndUpdateCommitPoint() throws IOException {
RefCounted<SolrIndexSearcher> searcher = null;
IndexCommit commitPoint;
// must get the latest solrCore object because the one we have might be closed because of a reload
// todo stop keeping solrCore around
SolrCore core = solrCore.getCoreContainer().getCore(solrCore.getName());
try {
Future[] waitSearcher = new Future[1];
searcher = core.getSearcher(true, true, waitSearcher, true);
if (waitSearcher[0] != null) {
try {
waitSearcher[0].get();
} catch (InterruptedException | ExecutionException e) {
SolrException.log(LOG, e);
}
}
commitPoint = searcher.get().getIndexReader().getIndexCommit();
} finally {
if (searcher != null) {
searcher.decref();
}
core.close();
}
// update the commit point in replication handler
replicationHandler.indexCommitPoint = commitPoint;
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class CreateSnapshotOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
CoreContainer cc = it.handler.getCoreContainer();
final SolrParams params = it.req.getParams();
String commitName = params.required().get(CoreAdminParams.COMMIT_NAME);
String cname = params.required().get(CoreAdminParams.CORE);
try (SolrCore core = cc.getCore(cname)) {
if (core == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to locate core " + cname);
}
String indexDirPath = core.getIndexDir();
IndexCommit ic = core.getDeletionPolicy().getLatestCommit();
if (ic == null) {
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
ic = searcher.get().getIndexReader().getIndexCommit();
} finally {
searcher.decref();
}
}
SolrSnapshotMetaDataManager mgr = core.getSnapshotMetaDataManager();
mgr.snapshot(commitName, indexDirPath, ic.getGeneration());
it.rsp.add(CoreAdminParams.CORE, core.getName());
it.rsp.add(CoreAdminParams.COMMIT_NAME, commitName);
it.rsp.add(SolrSnapshotManager.INDEX_DIR_PATH, indexDirPath);
it.rsp.add(SolrSnapshotManager.GENERATION_NUM, ic.getGeneration());
it.rsp.add(SolrSnapshotManager.FILE_LIST, ic.getFileNames());
}
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class IndexReplicationHandler method cleanupOldIndexFiles.
/**
* Cleans up the index directory from old index files. This method uses the
* last commit found by {@link #getLastCommit(Directory)}. If it matches the
* expected segmentsFile, then all files not referenced by this commit point
* are deleted.
* <p>
* <b>NOTE:</b> this method does a best effort attempt to clean the index
* directory. It suppresses any exceptions that occur, as this can be retried
* the next time.
*/
public static void cleanupOldIndexFiles(Directory dir, String segmentsFile, InfoStream infoStream) {
try {
IndexCommit commit = getLastCommit(dir);
// segments files mismatch), then ignore that commit either.
if (commit != null && commit.getSegmentsFileName().equals(segmentsFile)) {
Set<String> commitFiles = new HashSet<>();
commitFiles.addAll(commit.getFileNames());
Matcher matcher = IndexFileNames.CODEC_FILE_PATTERN.matcher("");
for (String file : dir.listAll()) {
if (!commitFiles.contains(file) && (matcher.reset(file).matches() || file.startsWith(IndexFileNames.SEGMENTS))) {
// suppress exceptions, it's just a best effort
IOUtils.deleteFilesIgnoringExceptions(dir, file);
}
}
}
} catch (Throwable t) {
// revision.
if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
infoStream.message(INFO_STREAM_COMPONENT, "cleanupOldIndexFiles(): failed on error " + t.getMessage());
}
}
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class TestSolrCoreSnapshots method testIndexOptimization.
@Test
public void testIndexOptimization() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCoreSnapshots_IndexOptimization";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
try (SolrClient adminClient = getHttpSolrClient(cluster.getJettySolrRunners().get(0).getBaseUrl().toString());
SolrClient masterClient = getHttpSolrClient(replica.getCoreUrl())) {
SnapshotMetaData metaData = createSnapshot(adminClient, coreName, commitName);
int numTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempt = 0; attempt < numTests; attempt++) {
//Modify existing index before we call optimize.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery("id:" + i);
}
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
}
// Before invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 0);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// After invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 1);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Delete the snapshot
deleteSnapshot(adminClient, coreName, metaData.getName());
// Add few documents. Without this the optimize command below does not take effect.
{
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// Verify that the index directory contains only 1 index commit (which is not the same as the snapshotted commit).
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertTrue(commits.size() == 1);
assertFalse(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class TestSolrDeletionPolicy1 method testKeepOptimizedOnlyCommits.
@Test
public void testKeepOptimizedOnlyCommits() {
IndexDeletionPolicyWrapper delPolicy = h.getCore().getDeletionPolicy();
addDocs();
Map<Long, IndexCommit> commits = delPolicy.getCommits();
IndexCommit latest = delPolicy.getLatestCommit();
for (Long gen : commits.keySet()) {
if (commits.get(gen) == latest)
continue;
assertEquals(1, commits.get(gen).getSegmentCount());
}
}
Aggregations