Search in sources :

Example 11 with RetentionLeases

use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.

the class SoftDeletesPolicyTests method testSoftDeletesRetentionLock.

/**
 * Makes sure we won't advance the retained seq# if the retention lock is held
 */
public void testSoftDeletesRetentionLock() {
    long retainedOps = between(0, 10000);
    AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED);
    final AtomicLong[] retainingSequenceNumbers = new AtomicLong[randomIntBetween(0, 8)];
    for (int i = 0; i < retainingSequenceNumbers.length; i++) {
        retainingSequenceNumbers[i] = new AtomicLong();
    }
    final Supplier<RetentionLeases> retentionLeasesSupplier = () -> {
        final List<RetentionLease> leases = new ArrayList<>(retainingSequenceNumbers.length);
        for (int i = 0; i < retainingSequenceNumbers.length; i++) {
            leases.add(new RetentionLease(Integer.toString(i), retainingSequenceNumbers[i].get(), 0L, "test"));
        }
        return new RetentionLeases(1, 1, leases);
    };
    long safeCommitCheckpoint = globalCheckpoint.get();
    SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps, retentionLeasesSupplier);
    long minRetainedSeqNo = policy.getMinRetainedSeqNo();
    List<Releasable> locks = new ArrayList<>();
    int iters = scaledRandomIntBetween(10, 1000);
    for (int i = 0; i < iters; i++) {
        if (randomBoolean()) {
            locks.add(policy.acquireRetentionLock());
        }
        // Advances the global checkpoint and the local checkpoint of a safe commit
        globalCheckpoint.addAndGet(between(0, 1000));
        for (final AtomicLong retainingSequenceNumber : retainingSequenceNumbers) {
            retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), Math.max(globalCheckpoint.get(), 0L)));
        }
        safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get());
        policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint);
        if (rarely()) {
            retainedOps = between(0, 10000);
            policy.setRetentionOperations(retainedOps);
        }
        // Release some locks
        List<Releasable> releasingLocks = randomSubsetOf(locks);
        locks.removeAll(releasingLocks);
        releasingLocks.forEach(Releasable::close);
        // getting the query has side effects, updating the internal state of the policy
        final Query query = policy.getRetentionQuery();
        assertThat(query, instanceOf(PointRangeQuery.class));
        final PointRangeQuery retentionQuery = (PointRangeQuery) query;
        // we only expose the minimum sequence number to the merge policy if the retention lock is not held
        if (locks.isEmpty()) {
            final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers).mapToLong(AtomicLong::get).min().orElse(Long.MAX_VALUE);
            long retainedSeqNo = Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps));
            minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
        }
        assertThat(retentionQuery.getNumDims(), equalTo(1));
        assertThat(LongPoint.decodeDimension(retentionQuery.getLowerPoint(), 0), equalTo(minRetainedSeqNo));
        assertThat(LongPoint.decodeDimension(retentionQuery.getUpperPoint(), 0), equalTo(Long.MAX_VALUE));
        assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
    }
    locks.forEach(Releasable::close);
    final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers).mapToLong(AtomicLong::get).min().orElse(Long.MAX_VALUE);
    long retainedSeqNo = Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps));
    minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
    assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
}
Also used : Query(org.apache.lucene.search.Query) PointRangeQuery(org.apache.lucene.search.PointRangeQuery) ArrayList(java.util.ArrayList) LongPoint(org.apache.lucene.document.LongPoint) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) AtomicLong(java.util.concurrent.atomic.AtomicLong) RetentionLease(org.opensearch.index.seqno.RetentionLease) PointRangeQuery(org.apache.lucene.search.PointRangeQuery) ArrayList(java.util.ArrayList) List(java.util.List) Releasable(org.opensearch.common.lease.Releasable)

Example 12 with RetentionLeases

use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.

the class RecoverySourceHandlerTests method testSendOperationsConcurrently.

public void testSendOperationsConcurrently() throws Throwable {
    final IndexShard shard = mock(IndexShard.class);
    when(shard.state()).thenReturn(IndexShardState.STARTED);
    Set<Long> receivedSeqNos = ConcurrentCollections.newConcurrentSet();
    long maxSeenAutoIdTimestamp = randomBoolean() ? -1 : randomNonNegativeLong();
    long maxSeqNoOfUpdatesOrDeletes = randomBoolean() ? -1 : randomNonNegativeLong();
    RetentionLeases retentionLeases = new RetentionLeases(randomNonNegativeLong(), randomNonNegativeLong(), Collections.emptySet());
    long mappingVersion = randomNonNegativeLong();
    AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
    int numOps = randomIntBetween(0, 1000);
    AtomicBoolean received = new AtomicBoolean();
    RecoveryTargetHandler target = new TestRecoveryTargetHandler() {

        @Override
        public void indexTranslogOperations(List<Translog.Operation> operations, int receivedTotalOps, long receivedMaxSeenAutoIdTimestamp, long receivedMaxSeqNoOfUpdatesOrDeletes, RetentionLeases receivedRetentionLease, long receivedMappingVersion, ActionListener<Long> listener) {
            received.set(true);
            assertThat(receivedMaxSeenAutoIdTimestamp, equalTo(maxSeenAutoIdTimestamp));
            assertThat(receivedMaxSeqNoOfUpdatesOrDeletes, equalTo(maxSeqNoOfUpdatesOrDeletes));
            assertThat(receivedRetentionLease, equalTo(retentionLeases));
            assertThat(receivedMappingVersion, equalTo(mappingVersion));
            assertThat(receivedTotalOps, equalTo(numOps));
            for (Translog.Operation operation : operations) {
                receivedSeqNos.add(operation.seqNo());
            }
            if (randomBoolean()) {
                localCheckpoint.addAndGet(randomIntBetween(1, 100));
            }
            listener.onResponse(localCheckpoint.get());
        }
    };
    PlainActionFuture<RecoverySourceHandler.SendSnapshotResult> sendFuture = new PlainActionFuture<>();
    long startingSeqNo = randomIntBetween(0, 1000);
    long endingSeqNo = startingSeqNo + randomIntBetween(0, 10000);
    List<Translog.Operation> operations = generateOperations(numOps);
    Randomness.shuffle(operations);
    List<Translog.Operation> skipOperations = randomSubsetOf(operations);
    Translog.Snapshot snapshot = newTranslogSnapshot(operations, skipOperations);
    RecoverySourceHandler handler = new RecoverySourceHandler(shard, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, getStartRecoveryRequest(), between(1, 10 * 1024), between(1, 5), between(1, 5));
    handler.phase2(startingSeqNo, endingSeqNo, snapshot, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, retentionLeases, mappingVersion, sendFuture);
    RecoverySourceHandler.SendSnapshotResult sendSnapshotResult = sendFuture.actionGet();
    assertTrue(received.get());
    assertThat(sendSnapshotResult.targetLocalCheckpoint, equalTo(localCheckpoint.get()));
    assertThat(sendSnapshotResult.sentOperations, equalTo(receivedSeqNos.size()));
    Set<Long> sentSeqNos = new HashSet<>();
    for (Translog.Operation op : operations) {
        if (startingSeqNo <= op.seqNo() && op.seqNo() <= endingSeqNo && skipOperations.contains(op) == false) {
            sentSeqNos.add(op.seqNo());
        }
    }
    assertThat(receivedSeqNos, equalTo(sentSeqNos));
}
Also used : IndexShard(org.opensearch.index.shard.IndexShard) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) Translog(org.opensearch.index.translog.Translog) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) LatchedActionListener(org.opensearch.action.LatchedActionListener) ActionListener(org.opensearch.action.ActionListener) PlainActionFuture(org.opensearch.action.support.PlainActionFuture) AtomicLong(java.util.concurrent.atomic.AtomicLong) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet)

Example 13 with RetentionLeases

use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.

the class IndexShardTests method testDocStats.

public void testDocStats() throws Exception {
    IndexShard indexShard = null;
    try {
        indexShard = newStartedShard(false, Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0).build());
        // at least two documents so we have docs to delete
        final long numDocs = randomIntBetween(2, 32);
        final long numDocsToDelete = randomLongBetween(1, numDocs);
        for (int i = 0; i < numDocs; i++) {
            final String id = Integer.toString(i);
            indexDoc(indexShard, "_doc", id);
        }
        if (randomBoolean()) {
            indexShard.refresh("test");
        } else {
            indexShard.flush(new FlushRequest());
        }
        {
            IndexShard shard = indexShard;
            assertBusy(() -> {
                ThreadPool threadPool = shard.getThreadPool();
                assertThat(threadPool.relativeTimeInMillis(), greaterThan(shard.getLastSearcherAccess()));
            });
            long prevAccessTime = shard.getLastSearcherAccess();
            final DocsStats docsStats = indexShard.docStats();
            assertThat("searcher was marked as accessed", shard.getLastSearcherAccess(), equalTo(prevAccessTime));
            assertThat(docsStats.getCount(), equalTo(numDocs));
            try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
                assertTrue(searcher.getIndexReader().numDocs() <= docsStats.getCount());
            }
            assertThat(docsStats.getDeleted(), equalTo(0L));
            assertThat(docsStats.getAverageSizeInBytes(), greaterThan(0L));
        }
        final List<Integer> ids = randomSubsetOf(Math.toIntExact(numDocsToDelete), IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList()));
        for (final Integer i : ids) {
            final String id = Integer.toString(i);
            deleteDoc(indexShard, id);
            indexDoc(indexShard, "_doc", id);
        }
        // Need to update and sync the global checkpoint and the retention leases for the soft-deletes retention MergePolicy.
        final long newGlobalCheckpoint = indexShard.getLocalCheckpoint();
        if (indexShard.routingEntry().primary()) {
            indexShard.updateLocalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint());
            indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint());
            indexShard.syncRetentionLeases();
        } else {
            indexShard.updateGlobalCheckpointOnReplica(newGlobalCheckpoint, "test");
            final RetentionLeases retentionLeases = indexShard.getRetentionLeases();
            indexShard.updateRetentionLeasesOnReplica(new RetentionLeases(retentionLeases.primaryTerm(), retentionLeases.version() + 1, retentionLeases.leases().stream().map(lease -> new RetentionLease(lease.id(), newGlobalCheckpoint + 1, lease.timestamp(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE)).collect(Collectors.toList())));
        }
        indexShard.sync();
        // flush the buffered deletes
        final FlushRequest flushRequest = new FlushRequest();
        flushRequest.force(false);
        flushRequest.waitIfOngoing(false);
        indexShard.flush(flushRequest);
        if (randomBoolean()) {
            indexShard.refresh("test");
        }
        {
            final DocsStats docStats = indexShard.docStats();
            try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
                assertTrue(searcher.getIndexReader().numDocs() <= docStats.getCount());
            }
            assertThat(docStats.getCount(), equalTo(numDocs));
        }
        // merge them away
        final ForceMergeRequest forceMergeRequest = new ForceMergeRequest();
        forceMergeRequest.maxNumSegments(1);
        indexShard.forceMerge(forceMergeRequest);
        if (randomBoolean()) {
            indexShard.refresh("test");
        } else {
            indexShard.flush(new FlushRequest());
        }
        {
            final DocsStats docStats = indexShard.docStats();
            assertThat(docStats.getCount(), equalTo(numDocs));
            assertThat(docStats.getDeleted(), equalTo(0L));
            assertThat(docStats.getAverageSizeInBytes(), greaterThan(0L));
        }
    } finally {
        closeShards(indexShard);
    }
}
Also used : Matchers.hasToString(org.hamcrest.Matchers.hasToString) SeqNoStats(org.opensearch.index.seqno.SeqNoStats) SequenceNumbers(org.opensearch.index.seqno.SequenceNumbers) MockFSDirectoryFactory(org.opensearch.test.store.MockFSDirectoryFactory) Arrays(java.util.Arrays) NRTReplicationEngineFactory(org.opensearch.index.engine.NRTReplicationEngineFactory) CheckedFunction(org.opensearch.common.CheckedFunction) IndexScopedSettings(org.opensearch.common.settings.IndexScopedSettings) Term(org.apache.lucene.index.Term) Matchers.not(org.hamcrest.Matchers.not) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Version(org.opensearch.Version) ReferenceManager(org.apache.lucene.search.ReferenceManager) Strings(org.opensearch.common.Strings) InternalEngine(org.opensearch.index.engine.InternalEngine) PlainActionFuture(org.opensearch.action.support.PlainActionFuture) IndexFieldDataCache(org.opensearch.index.fielddata.IndexFieldDataCache) RecoveryState(org.opensearch.indices.recovery.RecoveryState) Map(java.util.Map) Matchers.nullValue(org.hamcrest.Matchers.nullValue) ActionListener(org.opensearch.action.ActionListener) IOContext(org.apache.lucene.store.IOContext) Path(java.nio.file.Path) NodeEnvironment(org.opensearch.env.NodeEnvironment) TimeValue(org.opensearch.common.unit.TimeValue) Matchers.notNullValue(org.hamcrest.Matchers.notNullValue) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) ReplicationTracker(org.opensearch.index.seqno.ReplicationTracker) RegexMatcher.matches(org.opensearch.test.hamcrest.RegexMatcher.matches) Engine(org.opensearch.index.engine.Engine) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) FieldMaskingReader(org.opensearch.test.FieldMaskingReader) FileVisitResult(java.nio.file.FileVisitResult) CountDownLatch(java.util.concurrent.CountDownLatch) Logger(org.apache.logging.log4j.Logger) EngineConfigFactory(org.opensearch.index.engine.EngineConfigFactory) Stream(java.util.stream.Stream) Randomness(org.opensearch.common.Randomness) BytesArray(org.opensearch.common.bytes.BytesArray) XContentType(org.opensearch.common.xcontent.XContentType) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Matchers.is(org.hamcrest.Matchers.is) Matchers.containsString(org.hamcrest.Matchers.containsString) Matchers.in(org.hamcrest.Matchers.in) Mockito.mock(org.mockito.Mockito.mock) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory.jsonBuilder(org.opensearch.common.xcontent.XContentFactory.jsonBuilder) CodecService(org.opensearch.index.codec.CodecService) FlushRequest(org.opensearch.action.admin.indices.flush.FlushRequest) MapperParsingException(org.opensearch.index.mapper.MapperParsingException) ThreadPool(org.opensearch.threadpool.ThreadPool) TestShardRouting.newShardRouting(org.opensearch.cluster.routing.TestShardRouting.newShardRouting) Releasable(org.opensearch.common.lease.Releasable) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) RecoverySource(org.opensearch.cluster.routing.RecoverySource) DocIdSeqNoAndSource(org.opensearch.index.engine.DocIdSeqNoAndSource) UNASSIGNED_SEQ_NO(org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO) VersionUtils(org.opensearch.test.VersionUtils) ShardRoutingState(org.opensearch.cluster.routing.ShardRoutingState) VersionFieldMapper(org.opensearch.index.mapper.VersionFieldMapper) Matchers.hasSize(org.hamcrest.Matchers.hasSize) ParsedDocument(org.opensearch.index.mapper.ParsedDocument) CorruptionUtils(org.opensearch.test.CorruptionUtils) CommitStats(org.opensearch.index.engine.CommitStats) TopDocs(org.apache.lucene.search.TopDocs) ParseContext(org.opensearch.index.mapper.ParseContext) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Files(java.nio.file.Files) TestTranslog(org.opensearch.index.translog.TestTranslog) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) BytesStreamOutput(org.opensearch.common.io.stream.BytesStreamOutput) SourceFieldMapper(org.opensearch.index.mapper.SourceFieldMapper) IndexFieldDataService(org.opensearch.index.fielddata.IndexFieldDataService) XContentBuilder(org.opensearch.common.xcontent.XContentBuilder) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Matchers.sameInstance(org.hamcrest.Matchers.sameInstance) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) ShardStats(org.opensearch.action.admin.indices.stats.ShardStats) Assert(org.junit.Assert) RetentionLeaseSyncer(org.opensearch.index.seqno.RetentionLeaseSyncer) SeqNoFieldMapper(org.opensearch.index.mapper.SeqNoFieldMapper) CommonStats(org.opensearch.action.admin.indices.stats.CommonStats) ReadOnlyEngine(org.opensearch.index.engine.ReadOnlyEngine) IdFieldMapper(org.opensearch.index.mapper.IdFieldMapper) Matchers.either(org.hamcrest.Matchers.either) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) FieldDataStats(org.opensearch.index.fielddata.FieldDataStats) IndexableField(org.apache.lucene.index.IndexableField) Lucene.cleanLuceneIndex(org.opensearch.common.lucene.Lucene.cleanLuceneIndex) NoneCircuitBreakerService(org.opensearch.indices.breaker.NoneCircuitBreakerService) OpenSearchException(org.opensearch.OpenSearchException) Releasables(org.opensearch.common.lease.Releasables) CommonStatsFlags(org.opensearch.action.admin.indices.stats.CommonStatsFlags) Matchers.hasKey(org.hamcrest.Matchers.hasKey) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ForceMergeRequest(org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) MapperService(org.opensearch.index.mapper.MapperService) IndexId(org.opensearch.repositories.IndexId) Matchers.everyItem(org.hamcrest.Matchers.everyItem) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) Directory(org.apache.lucene.store.Directory) Assertions(org.opensearch.Assertions) XContentFactory(org.opensearch.common.xcontent.XContentFactory) DummyShardLock(org.opensearch.test.DummyShardLock) ReplicationLuceneIndex(org.opensearch.indices.replication.common.ReplicationLuceneIndex) UnassignedInfo(org.opensearch.cluster.routing.UnassignedInfo) SegmentReplicationCheckpointPublisher(org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher) SimpleFileVisitor(java.nio.file.SimpleFileVisitor) IndexShardRoutingTable(org.opensearch.cluster.routing.IndexShardRoutingTable) EngineTestCase(org.opensearch.index.engine.EngineTestCase) CyclicBarrier(java.util.concurrent.CyclicBarrier) BytesRef(org.apache.lucene.util.BytesRef) MappedFieldType(org.opensearch.index.mapper.MappedFieldType) SnapshotId(org.opensearch.snapshots.SnapshotId) Matchers.lessThanOrEqualTo(org.hamcrest.Matchers.lessThanOrEqualTo) DirectoryReader(org.apache.lucene.index.DirectoryReader) Store(org.opensearch.index.store.Store) Collectors(java.util.stream.Collectors) Tuple(org.opensearch.common.collect.Tuple) GatedCloseable(org.opensearch.common.concurrent.GatedCloseable) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Matchers.equalTo(org.hamcrest.Matchers.equalTo) IndexSettings(org.opensearch.index.IndexSettings) ShardRoutingHelper(org.opensearch.cluster.routing.ShardRoutingHelper) Uid(org.opensearch.index.mapper.Uid) TranslogStats(org.opensearch.index.translog.TranslogStats) MappingMetadata(org.opensearch.cluster.metadata.MappingMetadata) IntStream(java.util.stream.IntStream) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) EngineConfig(org.opensearch.index.engine.EngineConfig) StoreUtils(org.opensearch.index.store.StoreUtils) IndicesFieldDataCache(org.opensearch.indices.fielddata.cache.IndicesFieldDataCache) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) HashSet(java.util.HashSet) IndexFieldData(org.opensearch.index.fielddata.IndexFieldData) SourceToParse(org.opensearch.index.mapper.SourceToParse) Charset(java.nio.charset.Charset) Translog(org.opensearch.index.translog.Translog) UUIDs(org.opensearch.common.UUIDs) IndicesQueryCache(org.opensearch.indices.IndicesQueryCache) StoreStats(org.opensearch.index.store.StoreStats) RetentionLease(org.opensearch.index.seqno.RetentionLease) StreamInput(org.opensearch.common.io.stream.StreamInput) InternalEngineFactory(org.opensearch.index.engine.InternalEngineFactory) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.oneOf(org.hamcrest.Matchers.oneOf) NRTReplicationEngine(org.opensearch.index.engine.NRTReplicationEngine) RecoveryTarget(org.opensearch.indices.recovery.RecoveryTarget) LongFunction(java.util.function.LongFunction) Collections.emptySet(java.util.Collections.emptySet) AllocationId(org.opensearch.cluster.routing.AllocationId) Semaphore(java.util.concurrent.Semaphore) BasicFileAttributes(java.nio.file.attribute.BasicFileAttributes) EMPTY_PARAMS(org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS) ShardRouting(org.opensearch.cluster.routing.ShardRouting) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) TestShardRouting(org.opensearch.cluster.routing.TestShardRouting) TermQuery(org.apache.lucene.search.TermQuery) Constants(org.apache.lucene.util.Constants) AtomicArray(org.opensearch.common.util.concurrent.AtomicArray) FilterDirectory(org.apache.lucene.store.FilterDirectory) Snapshot(org.opensearch.snapshots.Snapshot) ReplicationType(org.opensearch.indices.replication.common.ReplicationType) IndexRequest(org.opensearch.action.index.IndexRequest) Collections(java.util.Collections) ForceMergeRequest(org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest) ThreadPool(org.opensearch.threadpool.ThreadPool) Matchers.hasToString(org.hamcrest.Matchers.hasToString) Matchers.containsString(org.hamcrest.Matchers.containsString) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FlushRequest(org.opensearch.action.admin.indices.flush.FlushRequest) RetentionLease(org.opensearch.index.seqno.RetentionLease)

Example 14 with RetentionLeases

use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.

the class IndexShardTests method testTranslogRecoverySyncsTranslog.

public void testTranslogRecoverySyncsTranslog() throws IOException {
    Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
    IndexMetadata metadata = IndexMetadata.builder("test").putMapping("{ \"properties\": { \"foo\":  { \"type\": \"text\"}}}").settings(settings).primaryTerm(0, 1).build();
    IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null);
    recoverShardFromStore(primary);
    indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
    IndexShard replica = newShard(primary.shardId(), false, "n2", metadata, null);
    recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) {

        @Override
        public void indexTranslogOperations(final List<Translog.Operation> operations, final int totalTranslogOps, final long maxSeenAutoIdTimestamp, final long maxSeqNoOfUpdatesOrDeletes, final RetentionLeases retentionLeases, final long mappingVersion, final ActionListener<Long> listener) {
            super.indexTranslogOperations(operations, totalTranslogOps, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, retentionLeases, mappingVersion, ActionListener.wrap(r -> {
                assertFalse(replica.isSyncNeeded());
                listener.onResponse(r);
            }, listener::onFailure));
        }
    }, true, true);
    closeShards(primary, replica);
}
Also used : RecoveryTarget(org.opensearch.indices.recovery.RecoveryTarget) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) AtomicLong(java.util.concurrent.atomic.AtomicLong) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) IndexScopedSettings(org.opensearch.common.settings.IndexScopedSettings) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 15 with RetentionLeases

use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.

the class IndexShardRetentionLeaseTests method testPersistence.

public void testPersistence() throws IOException {
    final Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS).build();
    final IndexShard indexShard = newStartedShard(true, settings, new InternalEngineFactory());
    try {
        final int length = randomIntBetween(0, 8);
        final long[] minimumRetainingSequenceNumbers = new long[length];
        for (int i = 0; i < length; i++) {
            minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE);
            currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(randomNonNegativeLong()));
            indexShard.addRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {
            }));
        }
        currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(Long.MAX_VALUE));
        // force the retention leases to persist
        indexShard.persistRetentionLeases();
        // the written retention leases should equal our current retention leases
        final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get();
        final RetentionLeases writtenRetentionLeases = indexShard.loadRetentionLeases();
        assertThat(writtenRetentionLeases.version(), equalTo(1L + length));
        assertThat(writtenRetentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0])));
        // when we recover, we should recover the retention leases
        final IndexShard recoveredShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE));
        try {
            recoverShardFromStore(recoveredShard);
            final RetentionLeases recoveredRetentionLeases = recoveredShard.getEngine().config().retentionLeasesSupplier().get();
            assertThat(recoveredRetentionLeases.version(), equalTo(1L + length));
            assertThat(recoveredRetentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0])));
        } finally {
            closeShards(recoveredShard);
        }
        // we should not recover retention leases when force-allocating a stale primary
        final IndexShard forceRecoveredShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE));
        try {
            recoverShardFromStore(forceRecoveredShard);
            final RetentionLeases recoveredRetentionLeases = forceRecoveredShard.getEngine().config().retentionLeasesSupplier().get();
            assertThat(recoveredRetentionLeases.leases(), hasSize(1));
            assertThat(recoveredRetentionLeases.leases().iterator().next().id(), equalTo(ReplicationTracker.getPeerRecoveryRetentionLeaseId(indexShard.routingEntry())));
            assertThat(recoveredRetentionLeases.version(), equalTo(1L));
        } finally {
            closeShards(forceRecoveredShard);
        }
    } finally {
        closeShards(indexShard);
    }
}
Also used : InternalEngineFactory(org.opensearch.index.engine.InternalEngineFactory) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings) RetentionLeases(org.opensearch.index.seqno.RetentionLeases)

Aggregations

RetentionLeases (org.opensearch.index.seqno.RetentionLeases)31 ArrayList (java.util.ArrayList)16 IndexSettings (org.opensearch.index.IndexSettings)16 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)14 AtomicLong (java.util.concurrent.atomic.AtomicLong)13 Settings (org.opensearch.common.settings.Settings)13 PlainActionFuture (org.opensearch.action.support.PlainActionFuture)12 RetentionLease (org.opensearch.index.seqno.RetentionLease)12 IndexShard (org.opensearch.index.shard.IndexShard)12 List (java.util.List)11 ActionListener (org.opensearch.action.ActionListener)11 IOException (java.io.IOException)10 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)10 CountDownLatch (java.util.concurrent.CountDownLatch)9 IndexCommit (org.apache.lucene.index.IndexCommit)9 ShardRouting (org.opensearch.cluster.routing.ShardRouting)9 GatedCloseable (org.opensearch.common.concurrent.GatedCloseable)8 ReplicationTracker (org.opensearch.index.seqno.ReplicationTracker)8 Translog (org.opensearch.index.translog.Translog)8 Arrays (java.util.Arrays)7