Search in sources :

Example 11 with LogDocMergePolicy

use of org.apache.lucene.index.LogDocMergePolicy in project crate by crate.

the class InternalEngineTests method testRenewSyncFlush.

@Test
public void testRenewSyncFlush() throws Exception {
    // run this a couple of times to get some coverage
    final int iters = randomIntBetween(2, 5);
    for (int i = 0; i < iters; i++) {
        try (Store store = createStore();
            InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null))) {
            final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
            Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null));
            engine.index(doc1);
            assertEquals(engine.getLastWriteNanos(), doc1.startTime());
            engine.flush();
            Engine.Index doc2 = indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null));
            engine.index(doc2);
            assertEquals(engine.getLastWriteNanos(), doc2.startTime());
            engine.flush();
            final boolean forceMergeFlushes = randomBoolean();
            final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null);
            if (forceMergeFlushes) {
                engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false, UNASSIGNED_SEQ_NO, 0));
            } else {
                engine.index(indexForDoc(parsedDoc3));
            }
            Engine.CommitId commitID = engine.flush();
            assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS);
            assertEquals(3, engine.segments(false).size());
            engine.forceMerge(forceMergeFlushes, 1, false, false, false, UUIDs.randomBase64UUID());
            if (forceMergeFlushes == false) {
                engine.refresh("make all segments visible");
                assertEquals(4, engine.segments(false).size());
                assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
                assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
                assertTrue(engine.tryRenewSyncCommit());
                assertEquals(1, engine.segments(false).size());
            } else {
                engine.refresh("test");
                assertBusy(() -> assertEquals(1, engine.segments(false).size()));
            }
            assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
            assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
            if (randomBoolean()) {
                Engine.Index doc4 = indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null));
                engine.index(doc4);
                assertEquals(engine.getLastWriteNanos(), doc4.startTime());
            } else {
                Engine.Delete delete = new Engine.Delete(doc1.id(), doc1.uid(), UNASSIGNED_SEQ_NO, primaryTerm.get(), Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
                engine.delete(delete);
                assertEquals(engine.getLastWriteNanos(), delete.startTime());
            }
            assertFalse(engine.tryRenewSyncCommit());
            // we might hit a concurrent flush from a finishing merge here - just wait if ongoing...
            engine.flush(false, true);
            assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID));
            assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
        }
    }
}
Also used : ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) Store(org.elasticsearch.index.store.Store) LogDocMergePolicy(org.apache.lucene.index.LogDocMergePolicy) Matchers.containsString(org.hamcrest.Matchers.containsString) LongPoint(org.apache.lucene.document.LongPoint) Test(org.junit.Test)

Example 12 with LogDocMergePolicy

use of org.apache.lucene.index.LogDocMergePolicy in project zm-mailbox by Zimbra.

the class LuceneIndex method getWriterConfig.

private IndexWriterConfig getWriterConfig() {
    IndexWriterConfig config = new IndexWriterConfig(VERSION, mailbox.index.getAnalyzer());
    config.setMergeScheduler(new MergeScheduler());
    config.setMaxBufferedDocs(LC.zimbra_index_lucene_max_buffered_docs.intValue());
    config.setRAMBufferSizeMB(LC.zimbra_index_lucene_ram_buffer_size_kb.intValue() / 1024.0);
    if (LC.zimbra_index_lucene_merge_policy.booleanValue()) {
        LogDocMergePolicy policy = new LogDocMergePolicy();
        config.setMergePolicy(policy);
        policy.setUseCompoundFile(LC.zimbra_index_lucene_use_compound_file.booleanValue());
        policy.setMergeFactor(LC.zimbra_index_lucene_merge_factor.intValue());
        policy.setMinMergeDocs(LC.zimbra_index_lucene_min_merge.intValue());
        if (LC.zimbra_index_lucene_max_merge.intValue() != Integer.MAX_VALUE) {
            policy.setMaxMergeDocs(LC.zimbra_index_lucene_max_merge.intValue());
        }
    } else {
        LogByteSizeMergePolicy policy = new LogByteSizeMergePolicy();
        config.setMergePolicy(policy);
        policy.setUseCompoundFile(LC.zimbra_index_lucene_use_compound_file.booleanValue());
        policy.setMergeFactor(LC.zimbra_index_lucene_merge_factor.intValue());
        policy.setMinMergeMB(LC.zimbra_index_lucene_min_merge.intValue() / 1024.0);
        if (LC.zimbra_index_lucene_max_merge.intValue() != Integer.MAX_VALUE) {
            policy.setMaxMergeMB(LC.zimbra_index_lucene_max_merge.intValue() / 1024.0);
        }
    }
    return config;
}
Also used : LogByteSizeMergePolicy(org.apache.lucene.index.LogByteSizeMergePolicy) LogDocMergePolicy(org.apache.lucene.index.LogDocMergePolicy) SerialMergeScheduler(org.apache.lucene.index.SerialMergeScheduler) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

LogDocMergePolicy (org.apache.lucene.index.LogDocMergePolicy)12 LongPoint (org.apache.lucene.document.LongPoint)4 LogMergePolicy (org.apache.lucene.index.LogMergePolicy)4 IndexReader (org.apache.lucene.index.IndexReader)3 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)3 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)2 Document (org.apache.lucene.document.Document)2 IndexWriter (org.apache.lucene.index.IndexWriter)2 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)2 LogByteSizeMergePolicy (org.apache.lucene.index.LogByteSizeMergePolicy)2 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)2 Store (org.elasticsearch.index.store.Store)2 Matchers.containsString (org.hamcrest.Matchers.containsString)2 Test (org.junit.Test)2 CommitMitigatingTieredMergePolicy (org.apache.jackrabbit.oak.plugins.index.lucene.writer.CommitMitigatingTieredMergePolicy)1 DoublePoint (org.apache.lucene.document.DoublePoint)1 FloatPoint (org.apache.lucene.document.FloatPoint)1 IntPoint (org.apache.lucene.document.IntPoint)1 LeafReader (org.apache.lucene.index.LeafReader)1 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)1