use of org.apache.lucene.index.LogDocMergePolicy in project crate by crate.
the class InternalEngineTests method testRenewSyncFlush.
@Test
public void testRenewSyncFlush() throws Exception {
// run this a couple of times to get some coverage
final int iters = randomIntBetween(2, 5);
for (int i = 0; i < iters; i++) {
try (Store store = createStore();
InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null))) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null));
engine.index(doc1);
assertEquals(engine.getLastWriteNanos(), doc1.startTime());
engine.flush();
Engine.Index doc2 = indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null));
engine.index(doc2);
assertEquals(engine.getLastWriteNanos(), doc2.startTime());
engine.flush();
final boolean forceMergeFlushes = randomBoolean();
final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null);
if (forceMergeFlushes) {
engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false, UNASSIGNED_SEQ_NO, 0));
} else {
engine.index(indexForDoc(parsedDoc3));
}
Engine.CommitId commitID = engine.flush();
assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS);
assertEquals(3, engine.segments(false).size());
engine.forceMerge(forceMergeFlushes, 1, false, false, false, UUIDs.randomBase64UUID());
if (forceMergeFlushes == false) {
engine.refresh("make all segments visible");
assertEquals(4, engine.segments(false).size());
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertTrue(engine.tryRenewSyncCommit());
assertEquals(1, engine.segments(false).size());
} else {
engine.refresh("test");
assertBusy(() -> assertEquals(1, engine.segments(false).size()));
}
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
if (randomBoolean()) {
Engine.Index doc4 = indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null));
engine.index(doc4);
assertEquals(engine.getLastWriteNanos(), doc4.startTime());
} else {
Engine.Delete delete = new Engine.Delete(doc1.id(), doc1.uid(), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
engine.delete(delete);
assertEquals(engine.getLastWriteNanos(), delete.startTime());
}
assertFalse(engine.tryRenewSyncCommit());
// we might hit a concurrent flush from a finishing merge here - just wait if ongoing...
engine.flush(false, true);
assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID));
assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
}
use of org.apache.lucene.index.LogDocMergePolicy in project zm-mailbox by Zimbra.
the class LuceneIndex method getWriterConfig.
private IndexWriterConfig getWriterConfig() {
IndexWriterConfig config = new IndexWriterConfig(VERSION, mailbox.index.getAnalyzer());
config.setMergeScheduler(new MergeScheduler());
config.setMaxBufferedDocs(LC.zimbra_index_lucene_max_buffered_docs.intValue());
config.setRAMBufferSizeMB(LC.zimbra_index_lucene_ram_buffer_size_kb.intValue() / 1024.0);
if (LC.zimbra_index_lucene_merge_policy.booleanValue()) {
LogDocMergePolicy policy = new LogDocMergePolicy();
config.setMergePolicy(policy);
policy.setUseCompoundFile(LC.zimbra_index_lucene_use_compound_file.booleanValue());
policy.setMergeFactor(LC.zimbra_index_lucene_merge_factor.intValue());
policy.setMinMergeDocs(LC.zimbra_index_lucene_min_merge.intValue());
if (LC.zimbra_index_lucene_max_merge.intValue() != Integer.MAX_VALUE) {
policy.setMaxMergeDocs(LC.zimbra_index_lucene_max_merge.intValue());
}
} else {
LogByteSizeMergePolicy policy = new LogByteSizeMergePolicy();
config.setMergePolicy(policy);
policy.setUseCompoundFile(LC.zimbra_index_lucene_use_compound_file.booleanValue());
policy.setMergeFactor(LC.zimbra_index_lucene_merge_factor.intValue());
policy.setMinMergeMB(LC.zimbra_index_lucene_min_merge.intValue() / 1024.0);
if (LC.zimbra_index_lucene_max_merge.intValue() != Integer.MAX_VALUE) {
policy.setMaxMergeMB(LC.zimbra_index_lucene_max_merge.intValue() / 1024.0);
}
}
return config;
}
Aggregations