use of org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy in project elasticsearch by elastic.
the class StoreTests method testUserDataRead.
public void testUserDataRead() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
config.setIndexDeletionPolicy(deletionPolicy);
IndexWriter writer = new IndexWriter(store.directory(), config);
Document doc = new Document();
doc.add(new TextField("id", "1", Field.Store.NO));
writer.addDocument(doc);
Map<String, String> commitData = new HashMap<>(2);
String syncId = "a sync id";
String translogId = "a translog id";
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogId);
writer.setCommitData(commitData);
writer.commit();
writer.close();
Store.MetadataSnapshot metadata;
metadata = store.getMetadata(randomBoolean() ? null : deletionPolicy.snapshot());
assertFalse(metadata.asMap().isEmpty());
// do not check for correct files, we have enough tests for that above
assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_GENERATION_KEY), equalTo(translogId));
TestUtil.checkIndex(store.directory());
assertDeleteContent(store, directoryService);
IOUtils.close(store);
}
use of org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy in project lucene-solr by apache.
the class TestControlledRealTimeReopenThread method testCRTReopen.
// Relies on wall clock time, so it can easily false-fail when the machine is otherwise busy:
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-5737")
public // LUCENE-5461
void testCRTReopen() throws Exception {
//test behaving badly
//should be high enough
int maxStaleSecs = 20;
//build crap data just to store it.
String s = " abcdefghijklmnopqrstuvwxyz ";
char[] chars = s.toCharArray();
StringBuilder builder = new StringBuilder(2048);
for (int i = 0; i < 2048; i++) {
builder.append(chars[random().nextInt(chars.length)]);
}
String content = builder.toString();
final SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
final Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir("nrt")), 5, 128);
IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
config.setCommitOnClose(true);
config.setIndexDeletionPolicy(sdp);
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
final IndexWriter iw = new IndexWriter(dir, config);
SearcherManager sm = new SearcherManager(iw, new SearcherFactory());
ControlledRealTimeReopenThread<IndexSearcher> controlledRealTimeReopenThread = new ControlledRealTimeReopenThread<>(iw, sm, maxStaleSecs, 0);
controlledRealTimeReopenThread.setDaemon(true);
controlledRealTimeReopenThread.start();
List<Thread> commitThreads = new ArrayList<>();
for (int i = 0; i < 500; i++) {
if (i > 0 && i % 50 == 0) {
Thread commitThread = new Thread(new Runnable() {
@Override
public void run() {
try {
iw.commit();
IndexCommit ic = sdp.snapshot();
for (String name : ic.getFileNames()) {
//distribute, and backup
//System.out.println(names);
assertTrue(slowFileExists(dir, name));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
commitThread.start();
commitThreads.add(commitThread);
}
Document d = new Document();
d.add(new TextField("count", i + "", Field.Store.NO));
d.add(new TextField("content", content, Field.Store.YES));
long start = System.currentTimeMillis();
long l = iw.addDocument(d);
controlledRealTimeReopenThread.waitForGeneration(l);
long wait = System.currentTimeMillis() - start;
assertTrue("waited too long for generation " + wait, wait < (maxStaleSecs * 1000));
IndexSearcher searcher = sm.acquire();
TopDocs td = searcher.search(new TermQuery(new Term("count", i + "")), 10);
sm.release(searcher);
assertEquals(1, td.totalHits);
}
for (Thread commitThread : commitThreads) {
commitThread.join();
}
controlledRealTimeReopenThread.close();
sm.close();
iw.close();
dir.close();
}
use of org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy in project lucene-solr by apache.
the class IndexRevisionTest method testNoSnapshotDeletionPolicy.
@Test
public void testNoSnapshotDeletionPolicy() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(null);
conf.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
IndexWriter writer = new IndexWriter(dir, conf);
// should fail when IndexDeletionPolicy is not Snapshot
expectThrows(IllegalArgumentException.class, () -> {
new IndexRevision(writer);
});
writer.close();
IOUtils.close(dir);
}
use of org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy in project elasticsearch by elastic.
the class RefreshListenersTests method setupListeners.
@Before
public void setupListeners() throws Exception {
// Setup dependencies of the listeners
maxListeners = randomIntBetween(1, 1000);
listeners = new RefreshListeners(() -> maxListeners, () -> engine.refresh("too-many-listeners"), // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test.
Runnable::run, logger);
// Now setup the InternalEngine which is much more complicated because we aren't mocking anything
threadPool = new TestThreadPool(getTestName());
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
Directory directory = newDirectory();
DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return directory;
}
};
store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
Engine.EventListener eventListener = new Engine.EventListener() {
@Override
public void onFailedEngine(String reason, @Nullable Exception e) {
// we don't need to notify anybody in this test
}
};
TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), shardId.getIndexName(), logger);
EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool, indexSettings, null, store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, translogHandler, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
engine = new InternalEngine(config);
listeners.setTranslog(engine.getTranslog());
}
use of org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy in project jackrabbit by apache.
the class IndexMigration method migrate.
/**
* Checks if the given <code>index</code> needs to be migrated.
*
* @param index the index to check and migration if needed.
* @param directoryManager the directory manager.
* @param oldSeparatorChar the old separator char that needs to be replaced.
* @throws IOException if an error occurs while migrating the index.
*/
public static void migrate(PersistentIndex index, DirectoryManager directoryManager, char oldSeparatorChar) throws IOException {
Directory indexDir = index.getDirectory();
log.debug("Checking {} ...", indexDir);
ReadOnlyIndexReader reader = index.getReadOnlyIndexReader();
try {
if (IndexFormatVersion.getVersion(reader).getVersion() >= IndexFormatVersion.V3.getVersion()) {
// index was created with Jackrabbit 1.5 or higher
// no need for migration
log.debug("IndexFormatVersion >= V3, no migration needed");
return;
}
// assert: there is at least one node in the index, otherwise the
// index format version would be at least V3
TermEnum terms = reader.terms(new Term(FieldNames.PROPERTIES, ""));
try {
Term t = terms.term();
if (t.text().indexOf(oldSeparatorChar) == -1) {
log.debug("Index already migrated");
return;
}
} finally {
terms.close();
}
} finally {
reader.release();
index.releaseWriterAndReaders();
}
// if we get here then the index must be migrated
log.debug("Index requires migration {}", indexDir);
String migrationName = index.getName() + "_v36";
if (directoryManager.hasDirectory(migrationName)) {
directoryManager.delete(migrationName);
}
Directory migrationDir = directoryManager.getDirectory(migrationName);
final IndexWriterConfig c = new IndexWriterConfig(Version.LUCENE_36, new JackrabbitAnalyzer());
c.setMergePolicy(new UpgradeIndexMergePolicy(new LogByteSizeMergePolicy()));
c.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
try {
IndexWriter writer = new IndexWriter(migrationDir, c);
try {
IndexReader r = new MigrationIndexReader(IndexReader.open(index.getDirectory()), oldSeparatorChar);
try {
writer.addIndexes(r);
writer.forceMerge(1);
writer.close();
} finally {
r.close();
}
} finally {
writer.close();
}
} finally {
migrationDir.close();
}
directoryManager.delete(index.getName());
if (!directoryManager.rename(migrationName, index.getName())) {
throw new IOException("failed to move migrated directory " + migrationDir);
}
log.info("Migrated " + index.getName());
}
Aggregations