Search in sources :

Example 1 with RetentionLease

use of org.opensearch.index.seqno.RetentionLease in project OpenSearch by opensearch-project.

the class ReplicaShardAllocatorIT method testRecentPrimaryInformation.

/**
 * Ensure that we fetch the latest shard store from the primary when a new node joins so we won't cancel the current recovery
 * for the copy on the newly joined node unless we can perform a noop recovery with that node.
 */
public void testRecentPrimaryInformation() throws Exception {
    String indexName = "test";
    String nodeWithPrimary = internalCluster().startNode();
    assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 0.1f).put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms").put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms").put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "1ms")));
    String nodeWithReplica = internalCluster().startDataOnlyNode();
    DiscoveryNode discoNodeWithReplica = internalCluster().getInstance(ClusterService.class, nodeWithReplica).localNode();
    Settings nodeWithReplicaSettings = internalCluster().dataPathSettings(nodeWithReplica);
    ensureGreen(indexName);
    indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList()));
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica));
    if (randomBoolean()) {
        indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList()));
    }
    CountDownLatch blockRecovery = new CountDownLatch(1);
    CountDownLatch recoveryStarted = new CountDownLatch(1);
    MockTransportService transportServiceOnPrimary = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary);
    transportServiceOnPrimary.addSendBehavior((connection, requestId, action, request, options) -> {
        if (PeerRecoveryTargetService.Actions.FILES_INFO.equals(action)) {
            recoveryStarted.countDown();
            try {
                blockRecovery.await();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            }
        }
        connection.sendRequest(requestId, action, request, options);
    });
    String newNode = internalCluster().startDataOnlyNode();
    recoveryStarted.await();
    // Index more documents and flush to destroy sync_id and remove the retention lease (as file_based_recovery_threshold reached).
    indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(50, 200)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList()));
    client().admin().indices().prepareFlush(indexName).get();
    assertBusy(() -> {
        for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getShards()) {
            for (RetentionLease lease : shardStats.getRetentionLeaseStats().retentionLeases().leases()) {
                assertThat(lease.id(), not(equalTo(ReplicationTracker.getPeerRecoveryRetentionLeaseId(discoNodeWithReplica.getId()))));
            }
        }
    });
    // AllocationService only calls GatewayAllocator if there are unassigned shards
    assertAcked(client().admin().indices().prepareCreate("dummy-index").setWaitForActiveShards(0).setSettings(Settings.builder().put("index.routing.allocation.require.attr", "not-found")));
    internalCluster().startDataOnlyNode(nodeWithReplicaSettings);
    // need to wait for events to ensure the reroute has happened since we perform it async when a new node joins.
    client().admin().cluster().prepareHealth(indexName).setWaitForYellowStatus().setWaitForEvents(Priority.LANGUID).get();
    blockRecovery.countDown();
    ensureGreen(indexName);
    assertThat(internalCluster().nodesInclude(indexName), hasItem(newNode));
    for (RecoveryState recovery : client().admin().indices().prepareRecoveries(indexName).get().shardRecoveryStates().get(indexName)) {
        if (recovery.getPrimary() == false) {
            assertThat(recovery.getIndex().fileDetails(), not(empty()));
        }
    }
    transportServiceOnPrimary.clearAllRules();
}
Also used : ShardStats(org.opensearch.action.admin.indices.stats.ShardStats) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) ClusterService(org.opensearch.cluster.service.ClusterService) MockTransportService(org.opensearch.test.transport.MockTransportService) MockTransportService(org.opensearch.test.transport.MockTransportService) TransportService(org.opensearch.transport.TransportService) RetentionLease(org.opensearch.index.seqno.RetentionLease) CountDownLatch(java.util.concurrent.CountDownLatch) RecoveryState(org.opensearch.indices.recovery.RecoveryState) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 2 with RetentionLease

use of org.opensearch.index.seqno.RetentionLease in project OpenSearch by opensearch-project.

the class SoftDeletesPolicy method getMinRetainedSeqNo.

/**
 * Returns the min seqno that is retained in the Lucene index.
 * Operations whose seq# is least this value should exist in the Lucene index.
 */
synchronized long getMinRetainedSeqNo() {
    /*
         * When an engine is flushed, we need to provide it the latest collection of retention leases even when the soft deletes policy is
         * locked for peer recovery.
         */
    final RetentionLeases retentionLeases = retentionLeasesSupplier.get();
    // do not advance if the retention lock is held
    if (retentionLockCount == 0) {
        /*
             * This policy retains operations for two purposes: peer-recovery and querying changes history.
             *  - Peer-recovery is driven by the local checkpoint of the safe commit. In peer-recovery, the primary transfers a safe commit,
             *    then sends operations after the local checkpoint of that commit. This requires keeping all ops after
             *    localCheckpointOfSafeCommit.
             *  - Changes APIs are driven by a combination of the global checkpoint, retention operations, and retention leases. Here we
             *    prefer using the global checkpoint instead of the maximum sequence number because only operations up to the global
             *    checkpoint are exposed in the changes APIs.
             */
        // calculate the minimum sequence number to retain based on retention leases
        final long minimumRetainingSequenceNumber = retentionLeases.leases().stream().mapToLong(RetentionLease::retainingSequenceNumber).min().orElse(Long.MAX_VALUE);
        /*
             * The minimum sequence number to retain is the minimum of the minimum based on retention leases, and the number of operations
             * below the global checkpoint to retain (index.soft_deletes.retention.operations). The additional increments on the global
             * checkpoint and the local checkpoint of the safe commit are due to the fact that we want to retain all operations above
             * those checkpoints.
             */
        final long minSeqNoForQueryingChanges = Math.min(1 + globalCheckpointSupplier.getAsLong() - retentionOperations, minimumRetainingSequenceNumber);
        final long minSeqNoToRetain = Math.min(minSeqNoForQueryingChanges, 1 + localCheckpointOfSafeCommit);
        /*
             * We take the maximum as minSeqNoToRetain can go backward as the retention operations value can be changed in settings, or from
             * the addition of leases with a retaining sequence number lower than previous retaining sequence numbers.
             */
        minRetainedSeqNo = Math.max(minRetainedSeqNo, minSeqNoToRetain);
    }
    return minRetainedSeqNo;
}
Also used : RetentionLease(org.opensearch.index.seqno.RetentionLease) RetentionLeases(org.opensearch.index.seqno.RetentionLeases)

Example 3 with RetentionLease

use of org.opensearch.index.seqno.RetentionLease in project OpenSearch by opensearch-project.

the class RelocationIT method assertActiveCopiesEstablishedPeerRecoveryRetentionLeases.

private void assertActiveCopiesEstablishedPeerRecoveryRetentionLeases() throws Exception {
    assertBusy(() -> {
        for (ObjectCursor<String> it : client().admin().cluster().prepareState().get().getState().metadata().indices().keys()) {
            Map<ShardId, List<ShardStats>> byShardId = Stream.of(client().admin().indices().prepareStats(it.value).get().getShards()).collect(Collectors.groupingBy(l -> l.getShardRouting().shardId()));
            for (List<ShardStats> shardStats : byShardId.values()) {
                Set<String> expectedLeaseIds = shardStats.stream().map(s -> ReplicationTracker.getPeerRecoveryRetentionLeaseId(s.getShardRouting())).collect(Collectors.toSet());
                for (ShardStats shardStat : shardStats) {
                    Set<String> actualLeaseIds = shardStat.getRetentionLeaseStats().retentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet());
                    assertThat(expectedLeaseIds, everyItem(in(actualLeaseIds)));
                }
            }
        }
    });
}
Also used : ShardId(org.opensearch.index.shard.ShardId) IndexRequestBuilder(org.opensearch.action.index.IndexRequestBuilder) Arrays(java.util.Arrays) IndexResponse(org.opensearch.action.index.IndexResponse) EnableAllocationDecider(org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider) StubbableTransport(org.opensearch.test.transport.StubbableTransport) Matchers.not(org.hamcrest.Matchers.not) ClusterScope(org.opensearch.test.OpenSearchIntegTestCase.ClusterScope) MoveAllocationCommand(org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand) Transport(org.opensearch.transport.Transport) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) WriteRequest(org.opensearch.action.support.WriteRequest) OpenSearchAssertions.assertHitCount(org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount) Matchers.everyItem(org.hamcrest.Matchers.everyItem) QueryBuilders.matchAllQuery(org.opensearch.index.query.QueryBuilders.matchAllQuery) ClusterRerouteResponse(org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse) Map(java.util.Map) OpenSearchAssertions.assertSearchHits(org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits) Path(java.nio.file.Path) MockIndexEventListener(org.opensearch.test.MockIndexEventListener) SimpleFileVisitor(java.nio.file.SimpleFileVisitor) NodeEnvironment(org.opensearch.env.NodeEnvironment) Client(org.opensearch.client.Client) TimeValue(org.opensearch.common.unit.TimeValue) IndexEventListener(org.opensearch.index.shard.IndexEventListener) OpenSearchAssertions.assertNoFailures(org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures) SearchHit(org.opensearch.search.SearchHit) Collection(java.util.Collection) TransportRequestOptions(org.opensearch.transport.TransportRequestOptions) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) ReplicationTracker(org.opensearch.index.seqno.ReplicationTracker) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) Scope(org.opensearch.test.OpenSearchIntegTestCase.Scope) TransportService(org.opensearch.transport.TransportService) Collectors(java.util.stream.Collectors) Nullable(org.opensearch.common.Nullable) Matchers.startsWith(org.hamcrest.Matchers.startsWith) FileVisitResult(java.nio.file.FileVisitResult) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) ClusterHealthResponse(org.opensearch.action.admin.cluster.health.ClusterHealthResponse) Stream(java.util.stream.Stream) Matchers.equalTo(org.hamcrest.Matchers.equalTo) IndexSettings(org.opensearch.index.IndexSettings) XContentType(org.opensearch.common.xcontent.XContentType) IntProcedure(com.carrotsearch.hppc.procedures.IntProcedure) OpenSearchIntegTestCase(org.opensearch.test.OpenSearchIntegTestCase) Matchers.in(org.hamcrest.Matchers.in) IndexShardState(org.opensearch.index.shard.IndexShardState) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Priority(org.opensearch.common.Priority) SearchHits(org.opensearch.search.SearchHits) MockTransportService(org.opensearch.test.transport.MockTransportService) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) IndexShard(org.opensearch.index.shard.IndexShard) PeerRecoveryTargetService(org.opensearch.indices.recovery.PeerRecoveryTargetService) InternalSettingsPlugin(org.opensearch.test.InternalSettingsPlugin) ShardRoutingState(org.opensearch.cluster.routing.ShardRoutingState) BackgroundIndexer(org.opensearch.test.BackgroundIndexer) SearchResponse(org.opensearch.action.search.SearchResponse) RetentionLease(org.opensearch.index.seqno.RetentionLease) OpenSearchAssertions.assertAcked(org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked) RecoveryFileChunkRequest(org.opensearch.indices.recovery.RecoveryFileChunkRequest) TransportRequest(org.opensearch.transport.TransportRequest) Files(java.nio.file.Files) Semaphore(java.util.concurrent.Semaphore) IntHashSet(com.carrotsearch.hppc.IntHashSet) IndexFileNames(org.apache.lucene.index.IndexFileNames) IOException(java.io.IOException) IndexService(org.opensearch.index.IndexService) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes) Plugin(org.opensearch.plugins.Plugin) ActionFuture(org.opensearch.action.ActionFuture) ShardId(org.opensearch.index.shard.ShardId) TimeUnit(java.util.concurrent.TimeUnit) ClusterService(org.opensearch.cluster.service.ClusterService) English(org.apache.lucene.util.English) ShardStats(org.opensearch.action.admin.indices.stats.ShardStats) ShardStats(org.opensearch.action.admin.indices.stats.ShardStats) List(java.util.List) ArrayList(java.util.ArrayList)

Example 4 with RetentionLease

use of org.opensearch.index.seqno.RetentionLease in project OpenSearch by opensearch-project.

the class InternalEngineTests method testKeepMinRetainedSeqNoByMergePolicy.

public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException {
    IOUtils.close(engine, store);
    Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10));
    final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
    final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
    final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
    final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE);
    final AtomicLong retentionLeasesVersion = new AtomicLong();
    final AtomicReference<RetentionLeases> retentionLeasesHolder = new AtomicReference<>(new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()));
    final List<Engine.Operation> operations = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 10, 300, "2");
    Randomness.shuffle(operations);
    Set<Long> existingSeqNos = new HashSet<>();
    store = createStore();
    engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get, retentionLeasesHolder::get));
    assertThat(engine.getMinRetainedSeqNo(), equalTo(0L));
    long lastMinRetainedSeqNo = engine.getMinRetainedSeqNo();
    for (Engine.Operation op : operations) {
        final Engine.Result result;
        if (op instanceof Engine.Index) {
            result = engine.index((Engine.Index) op);
        } else {
            result = engine.delete((Engine.Delete) op);
        }
        existingSeqNos.add(result.getSeqNo());
        if (randomBoolean()) {
            // advance persisted local checkpoint
            engine.syncTranslog();
            assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint());
            globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getPersistedCheckpoint()));
        }
        if (randomBoolean()) {
            retentionLeasesVersion.incrementAndGet();
            final int length = randomIntBetween(0, 8);
            final List<RetentionLease> leases = new ArrayList<>(length);
            for (int i = 0; i < length; i++) {
                final String id = randomAlphaOfLength(8);
                final long retainingSequenceNumber = randomLongBetween(0, Math.max(0, globalCheckpoint.get()));
                final long timestamp = randomLongBetween(0L, Long.MAX_VALUE);
                final String source = randomAlphaOfLength(8);
                leases.add(new RetentionLease(id, retainingSequenceNumber, timestamp, source));
            }
            retentionLeasesHolder.set(new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), leases));
        }
        if (rarely()) {
            settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10));
            indexSettings.updateIndexMetadata(IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build());
            engine.onSettingsChanged(indexSettings.getTranslogRetentionAge(), indexSettings.getTranslogRetentionSize(), indexSettings.getSoftDeleteRetentionOperations());
        }
        if (rarely()) {
            engine.refresh("test");
        }
        if (rarely()) {
            engine.flush(true, true);
            assertThat(Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(Engine.MIN_RETAINED_SEQNO)), equalTo(engine.getMinRetainedSeqNo()));
        }
        if (rarely()) {
            engine.forceMerge(randomBoolean(), 1, false, false, false, UUIDs.randomBase64UUID());
        }
        try (Closeable ignored = engine.acquireHistoryRetentionLock()) {
            long minRetainSeqNos = engine.getMinRetainedSeqNo();
            assertThat(minRetainSeqNos, lessThanOrEqualTo(globalCheckpoint.get() + 1));
            Long[] expectedOps = existingSeqNos.stream().filter(seqno -> seqno >= minRetainSeqNos).toArray(Long[]::new);
            Set<Long> actualOps = readAllOperationsInLucene(engine, createMapperService("test")).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet());
            assertThat(actualOps, containsInAnyOrder(expectedOps));
        }
        try (Engine.IndexCommitRef commitRef = engine.acquireSafeIndexCommit()) {
            IndexCommit safeCommit = commitRef.getIndexCommit();
            if (safeCommit.getUserData().containsKey(Engine.MIN_RETAINED_SEQNO)) {
                lastMinRetainedSeqNo = Long.parseLong(safeCommit.getUserData().get(Engine.MIN_RETAINED_SEQNO));
            }
        }
    }
    if (randomBoolean()) {
        engine.close();
    } else {
        engine.flushAndClose();
    }
    try (InternalEngine recoveringEngine = new InternalEngine(engine.config())) {
        assertThat(recoveringEngine.getMinRetainedSeqNo(), equalTo(lastMinRetainedSeqNo));
    }
}
Also used : Term(org.apache.lucene.index.Term) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Version(org.opensearch.Version) PEER_RECOVERY(org.opensearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY) MergePolicy(org.apache.lucene.index.MergePolicy) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) DefaultTranslogDeletionPolicy(org.opensearch.index.translog.DefaultTranslogDeletionPolicy) Path(java.nio.file.Path) TimeValue(org.opensearch.common.unit.TimeValue) Matchers.notNullValue(org.hamcrest.Matchers.notNullValue) SoftDeletesRetentionMergePolicy(org.apache.lucene.index.SoftDeletesRetentionMergePolicy) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) PointValues(org.apache.lucene.index.PointValues) CountDownLatch(java.util.concurrent.CountDownLatch) Logger(org.apache.logging.log4j.Logger) Randomness(org.opensearch.common.Randomness) BytesArray(org.opensearch.common.bytes.BytesArray) DocIdAndSeqNo(org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo) XContentType(org.opensearch.common.xcontent.XContentType) CodecService(org.opensearch.index.codec.CodecService) ThreadPool(org.opensearch.threadpool.ThreadPool) LogDocMergePolicy(org.apache.lucene.index.LogDocMergePolicy) FixedBitSet(org.apache.lucene.util.FixedBitSet) RegexFilter(org.apache.logging.log4j.core.filter.RegexFilter) Mockito.spy(org.mockito.Mockito.spy) Supplier(java.util.function.Supplier) LinkedHashMap(java.util.LinkedHashMap) ToLongBiFunction(java.util.function.ToLongBiFunction) IndexWriterMaxDocsChanger(org.apache.lucene.index.IndexWriterMaxDocsChanger) Lock(org.apache.lucene.store.Lock) Mapping(org.opensearch.index.mapper.Mapping) VersionFieldMapper(org.opensearch.index.mapper.VersionFieldMapper) Matchers.hasSize(org.hamcrest.Matchers.hasSize) Bits(org.apache.lucene.util.Bits) TieredMergePolicy(org.apache.lucene.index.TieredMergePolicy) Versions(org.opensearch.common.lucene.uid.Versions) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) TestTranslog(org.opensearch.index.translog.TestTranslog) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) UNASSIGNED_PRIMARY_TERM(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM) SourceFieldMapper(org.opensearch.index.mapper.SourceFieldMapper) AtomicLong(java.util.concurrent.atomic.AtomicLong) Phaser(java.util.concurrent.Phaser) TextField(org.apache.lucene.document.TextField) SeqNoFieldMapper(org.opensearch.index.mapper.SeqNoFieldMapper) Matchers.emptyArray(org.hamcrest.Matchers.emptyArray) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) BiFunction(java.util.function.BiFunction) StoredField(org.apache.lucene.document.StoredField) Matchers.hasKey(org.hamcrest.Matchers.hasKey) Matchers.everyItem(org.hamcrest.Matchers.everyItem) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Terms(org.apache.lucene.index.Terms) Matchers.lessThanOrEqualTo(org.hamcrest.Matchers.lessThanOrEqualTo) Store(org.opensearch.index.store.Store) Collectors(java.util.stream.Collectors) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Matchers.equalTo(org.hamcrest.Matchers.equalTo) LeafReader(org.apache.lucene.index.LeafReader) IndexSettings(org.opensearch.index.IndexSettings) MetadataFieldMapper(org.opensearch.index.mapper.MetadataFieldMapper) ShardUtils(org.opensearch.index.shard.ShardUtils) Queue(java.util.Queue) BigArrays(org.opensearch.common.util.BigArrays) IndexReader(org.apache.lucene.index.IndexReader) IndexSearcher(org.apache.lucene.search.IndexSearcher) IndexSettingsModule(org.opensearch.test.IndexSettingsModule) BytesReference(org.opensearch.common.bytes.BytesReference) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) CheckedBiConsumer(org.opensearch.common.CheckedBiConsumer) LOCAL_RESET(org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_RESET) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function(java.util.function.Function) HashSet(java.util.HashSet) Charset(java.nio.charset.Charset) Translog(org.opensearch.index.translog.Translog) PRIMARY(org.opensearch.index.engine.Engine.Operation.Origin.PRIMARY) IntSupplier(java.util.function.IntSupplier) RetentionLease(org.opensearch.index.seqno.RetentionLease) CoreMatchers.sameInstance(org.hamcrest.CoreMatchers.sameInstance) OpenSearchDirectoryReader(org.opensearch.common.lucene.index.OpenSearchDirectoryReader) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.empty(org.hamcrest.Matchers.empty) Semaphore(java.util.concurrent.Semaphore) Mockito.when(org.mockito.Mockito.when) ShardRouting(org.opensearch.cluster.routing.ShardRouting) IOUtils(org.opensearch.core.internal.io.IOUtils) ShardId(org.opensearch.index.shard.ShardId) TestShardRouting(org.opensearch.cluster.routing.TestShardRouting) VersionsAndSeqNoResolver(org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver) Field(org.apache.lucene.document.Field) TransportActions(org.opensearch.action.support.TransportActions) Comparator(java.util.Comparator) LogManager(org.apache.logging.log4j.LogManager) NoMergePolicy(org.apache.lucene.index.NoMergePolicy) SeqNoStats(org.opensearch.index.seqno.SeqNoStats) SequenceNumbers(org.opensearch.index.seqno.SequenceNumbers) Arrays(java.util.Arrays) LongSupplier(java.util.function.LongSupplier) Matchers.not(org.hamcrest.Matchers.not) Level(org.apache.logging.log4j.Level) ContentPath(org.opensearch.index.mapper.ContentPath) LogEvent(org.apache.logging.log4j.core.LogEvent) ReferenceManager(org.apache.lucene.search.ReferenceManager) Document(org.opensearch.index.mapper.ParseContext.Document) Strings(org.opensearch.common.Strings) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) RandomNumbers(com.carrotsearch.randomizedtesting.generators.RandomNumbers) REPLICA(org.opensearch.index.engine.Engine.Operation.Origin.REPLICA) TermsEnum(org.apache.lucene.index.TermsEnum) Matchers.nullValue(org.hamcrest.Matchers.nullValue) Lucene(org.opensearch.common.lucene.Lucene) ActionListener(org.opensearch.action.ActionListener) SequentialStoredFieldsLeafReader(org.opensearch.common.lucene.index.SequentialStoredFieldsLeafReader) FieldsVisitor(org.opensearch.index.fieldvisitor.FieldsVisitor) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) NO_OPS_PERFORMED(org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED) Mockito.atLeastOnce(org.mockito.Mockito.atLeastOnce) Set(java.util.Set) SortedSetSortField(org.apache.lucene.search.SortedSetSortField) Settings(org.opensearch.common.settings.Settings) ReplicationTracker(org.opensearch.index.seqno.ReplicationTracker) BuilderContext(org.opensearch.index.mapper.Mapper.BuilderContext) UncheckedIOException(java.io.UncheckedIOException) VersionType(org.opensearch.index.VersionType) Matchers.contains(org.hamcrest.Matchers.contains) CheckedRunnable(org.opensearch.common.CheckedRunnable) LocalCheckpointTracker(org.opensearch.index.seqno.LocalCheckpointTracker) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Matchers.is(org.hamcrest.Matchers.is) Matchers.containsString(org.hamcrest.Matchers.containsString) Matchers.in(org.hamcrest.Matchers.in) TriFunction(org.opensearch.common.TriFunction) IndexCommit(org.apache.lucene.index.IndexCommit) LiveIndexWriterConfig(org.apache.lucene.index.LiveIndexWriterConfig) TranslogDeletionPolicies.createTranslogDeletionPolicy(org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy) ArrayList(java.util.ArrayList) UNASSIGNED_SEQ_NO(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO) VersionUtils(org.opensearch.test.VersionUtils) ShardRoutingState(org.opensearch.cluster.routing.ShardRoutingState) ParsedDocument(org.opensearch.index.mapper.ParsedDocument) TopDocs(org.apache.lucene.search.TopDocs) ParseContext(org.opensearch.index.mapper.ParseContext) LongStream(java.util.stream.LongStream) SetOnce(org.apache.lucene.util.SetOnce) Files(java.nio.file.Files) AbstractAppender(org.apache.logging.log4j.core.appender.AbstractAppender) Matchers.hasItem(org.hamcrest.Matchers.hasItem) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) SnapshotMatchers(org.opensearch.index.translog.SnapshotMatchers) LOCAL_TRANSLOG_RECOVERY(org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY) Collections.shuffle(java.util.Collections.shuffle) IdFieldMapper(org.opensearch.index.mapper.IdFieldMapper) IndexableField(org.apache.lucene.index.IndexableField) NoneCircuitBreakerService(org.opensearch.indices.breaker.NoneCircuitBreakerService) OpenSearchException(org.opensearch.OpenSearchException) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) MapperService(org.opensearch.index.mapper.MapperService) Directory(org.apache.lucene.store.Directory) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) TotalHitCountCollector(org.apache.lucene.search.TotalHitCountCollector) IndexShardRoutingTable(org.opensearch.cluster.routing.IndexShardRoutingTable) CyclicBarrier(java.util.concurrent.CyclicBarrier) Sort(org.apache.lucene.search.Sort) BytesRef(org.apache.lucene.util.BytesRef) DirectoryReader(org.apache.lucene.index.DirectoryReader) TranslogConfig(org.opensearch.index.translog.TranslogConfig) TranslogDeletionPolicyFactory(org.opensearch.index.translog.TranslogDeletionPolicyFactory) SegmentInfos(org.apache.lucene.index.SegmentInfos) Tuple(org.opensearch.common.collect.Tuple) IndexWriter(org.apache.lucene.index.IndexWriter) List(java.util.List) MatcherAssert(org.hamcrest.MatcherAssert) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) MockDirectoryWrapper(org.apache.lucene.store.MockDirectoryWrapper) Uid(org.opensearch.index.mapper.Uid) LongPoint(org.apache.lucene.document.LongPoint) NumericDocValues(org.apache.lucene.index.NumericDocValues) ByteSizeValue(org.opensearch.common.unit.ByteSizeValue) HashMap(java.util.HashMap) ReleasableLock(org.opensearch.common.util.concurrent.ReleasableLock) AtomicReference(java.util.concurrent.atomic.AtomicReference) Loggers(org.opensearch.common.logging.Loggers) UUIDs(org.opensearch.common.UUIDs) Iterator(java.util.Iterator) Matchers(org.hamcrest.Matchers) RootObjectMapper(org.opensearch.index.mapper.RootObjectMapper) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) TermQuery(org.apache.lucene.search.TermQuery) Closeable(java.io.Closeable) IndexRequest(org.opensearch.action.index.IndexRequest) Collections(java.util.Collections) IndexSettings(org.opensearch.index.IndexSettings) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) Matchers.containsString(org.hamcrest.Matchers.containsString) TestTranslog(org.opensearch.index.translog.TestTranslog) Translog(org.opensearch.index.translog.Translog) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) IndexSettings(org.opensearch.index.IndexSettings) Settings(org.opensearch.common.settings.Settings) HashSet(java.util.HashSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) LongPoint(org.apache.lucene.document.LongPoint) IndexCommit(org.apache.lucene.index.IndexCommit) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) AtomicLong(java.util.concurrent.atomic.AtomicLong) RetentionLease(org.opensearch.index.seqno.RetentionLease) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 5 with RetentionLease

use of org.opensearch.index.seqno.RetentionLease in project OpenSearch by opensearch-project.

the class SoftDeletesPolicyTests method testSoftDeletesRetentionLock.

/**
 * Makes sure we won't advance the retained seq# if the retention lock is held
 */
public void testSoftDeletesRetentionLock() {
    long retainedOps = between(0, 10000);
    AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED);
    final AtomicLong[] retainingSequenceNumbers = new AtomicLong[randomIntBetween(0, 8)];
    for (int i = 0; i < retainingSequenceNumbers.length; i++) {
        retainingSequenceNumbers[i] = new AtomicLong();
    }
    final Supplier<RetentionLeases> retentionLeasesSupplier = () -> {
        final List<RetentionLease> leases = new ArrayList<>(retainingSequenceNumbers.length);
        for (int i = 0; i < retainingSequenceNumbers.length; i++) {
            leases.add(new RetentionLease(Integer.toString(i), retainingSequenceNumbers[i].get(), 0L, "test"));
        }
        return new RetentionLeases(1, 1, leases);
    };
    long safeCommitCheckpoint = globalCheckpoint.get();
    SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps, retentionLeasesSupplier);
    long minRetainedSeqNo = policy.getMinRetainedSeqNo();
    List<Releasable> locks = new ArrayList<>();
    int iters = scaledRandomIntBetween(10, 1000);
    for (int i = 0; i < iters; i++) {
        if (randomBoolean()) {
            locks.add(policy.acquireRetentionLock());
        }
        // Advances the global checkpoint and the local checkpoint of a safe commit
        globalCheckpoint.addAndGet(between(0, 1000));
        for (final AtomicLong retainingSequenceNumber : retainingSequenceNumbers) {
            retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), Math.max(globalCheckpoint.get(), 0L)));
        }
        safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get());
        policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint);
        if (rarely()) {
            retainedOps = between(0, 10000);
            policy.setRetentionOperations(retainedOps);
        }
        // Release some locks
        List<Releasable> releasingLocks = randomSubsetOf(locks);
        locks.removeAll(releasingLocks);
        releasingLocks.forEach(Releasable::close);
        // getting the query has side effects, updating the internal state of the policy
        final Query query = policy.getRetentionQuery();
        assertThat(query, instanceOf(PointRangeQuery.class));
        final PointRangeQuery retentionQuery = (PointRangeQuery) query;
        // we only expose the minimum sequence number to the merge policy if the retention lock is not held
        if (locks.isEmpty()) {
            final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers).mapToLong(AtomicLong::get).min().orElse(Long.MAX_VALUE);
            long retainedSeqNo = Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps));
            minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
        }
        assertThat(retentionQuery.getNumDims(), equalTo(1));
        assertThat(LongPoint.decodeDimension(retentionQuery.getLowerPoint(), 0), equalTo(minRetainedSeqNo));
        assertThat(LongPoint.decodeDimension(retentionQuery.getUpperPoint(), 0), equalTo(Long.MAX_VALUE));
        assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
    }
    locks.forEach(Releasable::close);
    final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers).mapToLong(AtomicLong::get).min().orElse(Long.MAX_VALUE);
    long retainedSeqNo = Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps));
    minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
    assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
}
Also used : Query(org.apache.lucene.search.Query) PointRangeQuery(org.apache.lucene.search.PointRangeQuery) ArrayList(java.util.ArrayList) LongPoint(org.apache.lucene.document.LongPoint) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) AtomicLong(java.util.concurrent.atomic.AtomicLong) RetentionLease(org.opensearch.index.seqno.RetentionLease) PointRangeQuery(org.apache.lucene.search.PointRangeQuery) ArrayList(java.util.ArrayList) List(java.util.List) Releasable(org.opensearch.common.lease.Releasable)

Aggregations

RetentionLease (org.opensearch.index.seqno.RetentionLease)23 ArrayList (java.util.ArrayList)16 RetentionLeases (org.opensearch.index.seqno.RetentionLeases)11 AtomicLong (java.util.concurrent.atomic.AtomicLong)9 StoreFileMetadata (org.opensearch.index.store.StoreFileMetadata)9 IOException (java.io.IOException)7 List (java.util.List)7 CountDownLatch (java.util.concurrent.CountDownLatch)7 Settings (org.opensearch.common.settings.Settings)7 IndexSettings (org.opensearch.index.IndexSettings)7 Arrays (java.util.Arrays)6 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)6 IndexShard (org.opensearch.index.shard.IndexShard)6 LongPoint (org.apache.lucene.document.LongPoint)5 CorruptIndexException (org.apache.lucene.index.CorruptIndexException)5 ActionListener (org.opensearch.action.ActionListener)5 TimeValue (org.opensearch.common.unit.TimeValue)5 ReplicationTracker (org.opensearch.index.seqno.ReplicationTracker)5 Store (org.opensearch.index.store.Store)5 Collections (java.util.Collections)4