use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class TestSolrDeletionPolicy1 method testCommitAge.
@Test
public void testCommitAge() throws InterruptedException {
assumeFalse("This test is not working on Windows (or maybe machines with only 2 CPUs)", Constants.WINDOWS);
IndexDeletionPolicyWrapper delPolicy = h.getCore().getDeletionPolicy();
addDocs();
Map<Long, IndexCommit> commits = delPolicy.getCommits();
IndexCommit ic = delPolicy.getLatestCommit();
String agestr = ((SolrDeletionPolicy) (delPolicy.getWrappedDeletionPolicy())).getMaxCommitAge().replaceAll("[a-zA-Z]", "").replaceAll("-", "");
long age = Long.parseLong(agestr);
Thread.sleep(age);
assertU(adoc("id", String.valueOf(6), "name", "name" + String.valueOf(6)));
assertU(optimize());
assertQ("return all docs", req("id:[0 TO 6]"), "*[count(//doc)=6]");
commits = delPolicy.getCommits();
assertTrue(!commits.containsKey(ic.getGeneration()));
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class TestControlledRealTimeReopenThread method testCRTReopen.
// Relies on wall clock time, so it can easily false-fail when the machine is otherwise busy:
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-5737")
public // LUCENE-5461
void testCRTReopen() throws Exception {
//test behaving badly
//should be high enough
int maxStaleSecs = 20;
//build crap data just to store it.
String s = " abcdefghijklmnopqrstuvwxyz ";
char[] chars = s.toCharArray();
StringBuilder builder = new StringBuilder(2048);
for (int i = 0; i < 2048; i++) {
builder.append(chars[random().nextInt(chars.length)]);
}
String content = builder.toString();
final SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
final Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir("nrt")), 5, 128);
IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
config.setCommitOnClose(true);
config.setIndexDeletionPolicy(sdp);
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
final IndexWriter iw = new IndexWriter(dir, config);
SearcherManager sm = new SearcherManager(iw, new SearcherFactory());
ControlledRealTimeReopenThread<IndexSearcher> controlledRealTimeReopenThread = new ControlledRealTimeReopenThread<>(iw, sm, maxStaleSecs, 0);
controlledRealTimeReopenThread.setDaemon(true);
controlledRealTimeReopenThread.start();
List<Thread> commitThreads = new ArrayList<>();
for (int i = 0; i < 500; i++) {
if (i > 0 && i % 50 == 0) {
Thread commitThread = new Thread(new Runnable() {
@Override
public void run() {
try {
iw.commit();
IndexCommit ic = sdp.snapshot();
for (String name : ic.getFileNames()) {
//distribute, and backup
//System.out.println(names);
assertTrue(slowFileExists(dir, name));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
commitThread.start();
commitThreads.add(commitThread);
}
Document d = new Document();
d.add(new TextField("count", i + "", Field.Store.NO));
d.add(new TextField("content", content, Field.Store.YES));
long start = System.currentTimeMillis();
long l = iw.addDocument(d);
controlledRealTimeReopenThread.waitForGeneration(l);
long wait = System.currentTimeMillis() - start;
assertTrue("waited too long for generation " + wait, wait < (maxStaleSecs * 1000));
IndexSearcher searcher = sm.acquire();
TopDocs td = searcher.search(new TermQuery(new Term("count", i + "")), 10);
sm.release(searcher);
assertEquals(1, td.totalHits);
}
for (Thread commitThread : commitThreads) {
commitThread.join();
}
controlledRealTimeReopenThread.close();
sm.close();
iw.close();
dir.close();
}
use of org.apache.lucene.index.IndexCommit in project SearchServices by Alfresco.
the class SolrInformationServer method getCoreStats.
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public Iterable<Entry<String, Object>> getCoreStats() throws IOException {
// This is still local, not totally cloud-friendly
// TODO Make this cloud-friendly by aggregating the stats across the cloud
SolrQueryRequest request = null;
NamedList<Object> coreSummary = new SimpleOrderedMap<Object>();
RefCounted<SolrIndexSearcher> refCounted = null;
try {
request = getLocalSolrQueryRequest();
NamedList docTypeCounts = this.getFacets(request, "*:*", FIELD_DOC_TYPE, 0);
long aclCount = getSafeCount(docTypeCounts, DOC_TYPE_ACL);
coreSummary.add("Alfresco Acls in Index", aclCount);
long nodeCount = getSafeCount(docTypeCounts, DOC_TYPE_NODE);
coreSummary.add("Alfresco Nodes in Index", nodeCount);
long txCount = getSafeCount(docTypeCounts, DOC_TYPE_TX);
coreSummary.add("Alfresco Transactions in Index", txCount);
long aclTxCount = getSafeCount(docTypeCounts, DOC_TYPE_ACL_TX);
coreSummary.add("Alfresco Acl Transactions in Index", aclTxCount);
long stateCount = getSafeCount(docTypeCounts, DOC_TYPE_STATE);
coreSummary.add("Alfresco States in Index", stateCount);
long unindexedNodeCount = getSafeCount(docTypeCounts, DOC_TYPE_UNINDEXED_NODE);
coreSummary.add("Alfresco Unindexed Nodes", unindexedNodeCount);
long errorNodeCount = getSafeCount(docTypeCounts, DOC_TYPE_ERROR_NODE);
coreSummary.add("Alfresco Error Nodes in Index", errorNodeCount);
refCounted = core.getSearcher(false, true, null);
SolrIndexSearcher solrIndexSearcher = refCounted.get();
coreSummary.add("Searcher", solrIndexSearcher.getStatistics());
Map<String, SolrInfoMBean> infoRegistry = core.getInfoRegistry();
for (String key : infoRegistry.keySet()) {
SolrInfoMBean infoMBean = infoRegistry.get(key);
if (key.equals("/alfresco")) {
// TODO Do we really need to fixStats in solr4?
coreSummary.add("/alfresco", fixStats(infoMBean.getStatistics()));
}
if (key.equals("/afts")) {
coreSummary.add("/afts", fixStats(infoMBean.getStatistics()));
}
if (key.equals("/cmis")) {
coreSummary.add("/cmis", fixStats(infoMBean.getStatistics()));
}
if (key.equals("filterCache")) {
coreSummary.add("/filterCache", infoMBean.getStatistics());
}
if (key.equals("queryResultCache")) {
coreSummary.add("/queryResultCache", infoMBean.getStatistics());
}
if (key.equals("alfrescoAuthorityCache")) {
coreSummary.add("/alfrescoAuthorityCache", infoMBean.getStatistics());
}
if (key.equals("alfrescoPathCache")) {
coreSummary.add("/alfrescoPathCache", infoMBean.getStatistics());
}
}
// Adds detailed stats for each registered searcher
int searcherIndex = 0;
List<SolrIndexSearcher> searchers = getRegisteredSearchers();
for (SolrIndexSearcher searcher : searchers) {
NamedList<Object> details = new SimpleOrderedMap<Object>();
details.add("Searcher", searcher.getStatistics());
coreSummary.add("Searcher-" + searcherIndex, details);
searcherIndex++;
}
coreSummary.add("Number of Searchers", searchers.size());
// This is zero for Solr4, whereas we had some local caches before
coreSummary.add("Total Searcher Cache (GB)", 0);
IndexDeletionPolicyWrapper delPolicy = core.getDeletionPolicy();
IndexCommit indexCommit = delPolicy.getLatestCommit();
// race?
if (indexCommit == null) {
indexCommit = solrIndexSearcher.getIndexReader().getIndexCommit();
}
if (indexCommit != null) {
// Tells Solr to stop deleting things for 20 seconds so we can get a snapshot of all the files on the index
delPolicy.setReserveDuration(solrIndexSearcher.getIndexReader().getVersion(), 20000);
Long fileSize = 0L;
File dir = new File(solrIndexSearcher.getPath());
for (String name : indexCommit.getFileNames()) {
File file = new File(dir, name);
if (file.exists()) {
fileSize += file.length();
}
}
DecimalFormat df = new DecimalFormat("###,###.######");
coreSummary.add("On disk (GB)", df.format(fileSize / 1024.0f / 1024.0f / 1024.0f));
coreSummary.add("Per node B", nodeCount > 0 ? fileSize / nodeCount : 0);
}
} finally {
if (request != null) {
request.close();
}
if (refCounted != null) {
refCounted.decref();
}
}
return coreSummary;
}
use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class InternalEngine method acquireLastIndexCommit.
@Override
public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws EngineException {
// the to a write lock when we fail the engine in this operation
if (flushFirst) {
logger.trace("start flush for snapshot");
flush(false, true);
logger.trace("finish flush for snapshot");
}
final IndexCommit lastCommit = combinedDeletionPolicy.acquireIndexCommit(false);
return new Engine.IndexCommitRef(lastCommit, () -> releaseIndexCommit(lastCommit));
}
use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class NoOpEngine method trimUnreferencedTranslogFiles.
/**
* This implementation will trim existing translog files using a {@link TranslogDeletionPolicy}
* that retains nothing but the last translog generation from safe commit.
*/
@Override
public void trimUnreferencedTranslogFiles() {
final Store store = this.engineConfig.getStore();
store.incRef();
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
final List<IndexCommit> commits = DirectoryReader.listCommits(store.directory());
if (commits.size() == 1 && translogStats.getTranslogSizeInBytes() > translogStats.getUncommittedSizeInBytes()) {
final Map<String, String> commitUserData = getLastCommittedSegmentInfos().getUserData();
final String translogUuid = commitUserData.get(Translog.TRANSLOG_UUID_KEY);
if (translogUuid == null) {
throw new IllegalStateException("commit doesn't contain translog unique id");
}
final TranslogConfig translogConfig = engineConfig.getTranslogConfig();
final long localCheckpoint = Long.parseLong(commitUserData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy(-1, -1, 0);
translogDeletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint);
try (Translog translog = new Translog(translogConfig, translogUuid, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier(), engineConfig.getPrimaryTermSupplier(), seqNo -> {
})) {
translog.trimUnreferencedReaders();
// refresh the translog stats
this.translogStats = translog.stats();
assert translog.currentFileGeneration() == translog.getMinFileGeneration() : "translog was not trimmed " + " current gen " + translog.currentFileGeneration() + " != min gen " + translog.getMinFileGeneration();
}
}
} catch (final Exception e) {
try {
failEngine("translog trimming failed", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw new EngineException(shardId, "failed to trim translog", e);
} finally {
store.decRef();
}
}
Aggregations