use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.
the class SoftDeletesPolicyTests method testSoftDeletesRetentionLock.
/**
* Makes sure we won't advance the retained seq# if the retention lock is held
*/
public void testSoftDeletesRetentionLock() {
long retainedOps = between(0, 10000);
AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED);
final AtomicLong[] retainingSequenceNumbers = new AtomicLong[randomIntBetween(0, 8)];
for (int i = 0; i < retainingSequenceNumbers.length; i++) {
retainingSequenceNumbers[i] = new AtomicLong();
}
final Supplier<RetentionLeases> retentionLeasesSupplier = () -> {
final List<RetentionLease> leases = new ArrayList<>(retainingSequenceNumbers.length);
for (int i = 0; i < retainingSequenceNumbers.length; i++) {
leases.add(new RetentionLease(Integer.toString(i), retainingSequenceNumbers[i].get(), 0L, "test"));
}
return new RetentionLeases(1, 1, leases);
};
long safeCommitCheckpoint = globalCheckpoint.get();
SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps, retentionLeasesSupplier);
long minRetainedSeqNo = policy.getMinRetainedSeqNo();
List<Releasable> locks = new ArrayList<>();
int iters = scaledRandomIntBetween(10, 1000);
for (int i = 0; i < iters; i++) {
if (randomBoolean()) {
locks.add(policy.acquireRetentionLock());
}
// Advances the global checkpoint and the local checkpoint of a safe commit
globalCheckpoint.addAndGet(between(0, 1000));
for (final AtomicLong retainingSequenceNumber : retainingSequenceNumbers) {
retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), Math.max(globalCheckpoint.get(), 0L)));
}
safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get());
policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint);
if (rarely()) {
retainedOps = between(0, 10000);
policy.setRetentionOperations(retainedOps);
}
// Release some locks
List<Releasable> releasingLocks = randomSubsetOf(locks);
locks.removeAll(releasingLocks);
releasingLocks.forEach(Releasable::close);
// getting the query has side effects, updating the internal state of the policy
final Query query = policy.getRetentionQuery();
assertThat(query, instanceOf(PointRangeQuery.class));
final PointRangeQuery retentionQuery = (PointRangeQuery) query;
// we only expose the minimum sequence number to the merge policy if the retention lock is not held
if (locks.isEmpty()) {
final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers).mapToLong(AtomicLong::get).min().orElse(Long.MAX_VALUE);
long retainedSeqNo = Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps));
minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
}
assertThat(retentionQuery.getNumDims(), equalTo(1));
assertThat(LongPoint.decodeDimension(retentionQuery.getLowerPoint(), 0), equalTo(minRetainedSeqNo));
assertThat(LongPoint.decodeDimension(retentionQuery.getUpperPoint(), 0), equalTo(Long.MAX_VALUE));
assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
}
locks.forEach(Releasable::close);
final long minimumRetainingSequenceNumber = Arrays.stream(retainingSequenceNumbers).mapToLong(AtomicLong::get).min().orElse(Long.MAX_VALUE);
long retainedSeqNo = Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps));
minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo);
assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo));
}
use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method testSendOperationsConcurrently.
public void testSendOperationsConcurrently() throws Throwable {
final IndexShard shard = mock(IndexShard.class);
when(shard.state()).thenReturn(IndexShardState.STARTED);
Set<Long> receivedSeqNos = ConcurrentCollections.newConcurrentSet();
long maxSeenAutoIdTimestamp = randomBoolean() ? -1 : randomNonNegativeLong();
long maxSeqNoOfUpdatesOrDeletes = randomBoolean() ? -1 : randomNonNegativeLong();
RetentionLeases retentionLeases = new RetentionLeases(randomNonNegativeLong(), randomNonNegativeLong(), Collections.emptySet());
long mappingVersion = randomNonNegativeLong();
AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
int numOps = randomIntBetween(0, 1000);
AtomicBoolean received = new AtomicBoolean();
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void indexTranslogOperations(List<Translog.Operation> operations, int receivedTotalOps, long receivedMaxSeenAutoIdTimestamp, long receivedMaxSeqNoOfUpdatesOrDeletes, RetentionLeases receivedRetentionLease, long receivedMappingVersion, ActionListener<Long> listener) {
received.set(true);
assertThat(receivedMaxSeenAutoIdTimestamp, equalTo(maxSeenAutoIdTimestamp));
assertThat(receivedMaxSeqNoOfUpdatesOrDeletes, equalTo(maxSeqNoOfUpdatesOrDeletes));
assertThat(receivedRetentionLease, equalTo(retentionLeases));
assertThat(receivedMappingVersion, equalTo(mappingVersion));
assertThat(receivedTotalOps, equalTo(numOps));
for (Translog.Operation operation : operations) {
receivedSeqNos.add(operation.seqNo());
}
if (randomBoolean()) {
localCheckpoint.addAndGet(randomIntBetween(1, 100));
}
listener.onResponse(localCheckpoint.get());
}
};
PlainActionFuture<RecoverySourceHandler.SendSnapshotResult> sendFuture = new PlainActionFuture<>();
long startingSeqNo = randomIntBetween(0, 1000);
long endingSeqNo = startingSeqNo + randomIntBetween(0, 10000);
List<Translog.Operation> operations = generateOperations(numOps);
Randomness.shuffle(operations);
List<Translog.Operation> skipOperations = randomSubsetOf(operations);
Translog.Snapshot snapshot = newTranslogSnapshot(operations, skipOperations);
RecoverySourceHandler handler = new RecoverySourceHandler(shard, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, getStartRecoveryRequest(), between(1, 10 * 1024), between(1, 5), between(1, 5));
handler.phase2(startingSeqNo, endingSeqNo, snapshot, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, retentionLeases, mappingVersion, sendFuture);
RecoverySourceHandler.SendSnapshotResult sendSnapshotResult = sendFuture.actionGet();
assertTrue(received.get());
assertThat(sendSnapshotResult.targetLocalCheckpoint, equalTo(localCheckpoint.get()));
assertThat(sendSnapshotResult.sentOperations, equalTo(receivedSeqNos.size()));
Set<Long> sentSeqNos = new HashSet<>();
for (Translog.Operation op : operations) {
if (startingSeqNo <= op.seqNo() && op.seqNo() <= endingSeqNo && skipOperations.contains(op) == false) {
sentSeqNos.add(op.seqNo());
}
}
assertThat(receivedSeqNos, equalTo(sentSeqNos));
}
use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.
the class IndexShardTests method testDocStats.
public void testDocStats() throws Exception {
IndexShard indexShard = null;
try {
indexShard = newStartedShard(false, Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0).build());
// at least two documents so we have docs to delete
final long numDocs = randomIntBetween(2, 32);
final long numDocsToDelete = randomLongBetween(1, numDocs);
for (int i = 0; i < numDocs; i++) {
final String id = Integer.toString(i);
indexDoc(indexShard, "_doc", id);
}
if (randomBoolean()) {
indexShard.refresh("test");
} else {
indexShard.flush(new FlushRequest());
}
{
IndexShard shard = indexShard;
assertBusy(() -> {
ThreadPool threadPool = shard.getThreadPool();
assertThat(threadPool.relativeTimeInMillis(), greaterThan(shard.getLastSearcherAccess()));
});
long prevAccessTime = shard.getLastSearcherAccess();
final DocsStats docsStats = indexShard.docStats();
assertThat("searcher was marked as accessed", shard.getLastSearcherAccess(), equalTo(prevAccessTime));
assertThat(docsStats.getCount(), equalTo(numDocs));
try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
assertTrue(searcher.getIndexReader().numDocs() <= docsStats.getCount());
}
assertThat(docsStats.getDeleted(), equalTo(0L));
assertThat(docsStats.getAverageSizeInBytes(), greaterThan(0L));
}
final List<Integer> ids = randomSubsetOf(Math.toIntExact(numDocsToDelete), IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList()));
for (final Integer i : ids) {
final String id = Integer.toString(i);
deleteDoc(indexShard, id);
indexDoc(indexShard, "_doc", id);
}
// Need to update and sync the global checkpoint and the retention leases for the soft-deletes retention MergePolicy.
final long newGlobalCheckpoint = indexShard.getLocalCheckpoint();
if (indexShard.routingEntry().primary()) {
indexShard.updateLocalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint());
indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint());
indexShard.syncRetentionLeases();
} else {
indexShard.updateGlobalCheckpointOnReplica(newGlobalCheckpoint, "test");
final RetentionLeases retentionLeases = indexShard.getRetentionLeases();
indexShard.updateRetentionLeasesOnReplica(new RetentionLeases(retentionLeases.primaryTerm(), retentionLeases.version() + 1, retentionLeases.leases().stream().map(lease -> new RetentionLease(lease.id(), newGlobalCheckpoint + 1, lease.timestamp(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE)).collect(Collectors.toList())));
}
indexShard.sync();
// flush the buffered deletes
final FlushRequest flushRequest = new FlushRequest();
flushRequest.force(false);
flushRequest.waitIfOngoing(false);
indexShard.flush(flushRequest);
if (randomBoolean()) {
indexShard.refresh("test");
}
{
final DocsStats docStats = indexShard.docStats();
try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
assertTrue(searcher.getIndexReader().numDocs() <= docStats.getCount());
}
assertThat(docStats.getCount(), equalTo(numDocs));
}
// merge them away
final ForceMergeRequest forceMergeRequest = new ForceMergeRequest();
forceMergeRequest.maxNumSegments(1);
indexShard.forceMerge(forceMergeRequest);
if (randomBoolean()) {
indexShard.refresh("test");
} else {
indexShard.flush(new FlushRequest());
}
{
final DocsStats docStats = indexShard.docStats();
assertThat(docStats.getCount(), equalTo(numDocs));
assertThat(docStats.getDeleted(), equalTo(0L));
assertThat(docStats.getAverageSizeInBytes(), greaterThan(0L));
}
} finally {
closeShards(indexShard);
}
}
use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.
the class IndexShardTests method testTranslogRecoverySyncsTranslog.
public void testTranslogRecoverySyncsTranslog() throws IOException {
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
IndexMetadata metadata = IndexMetadata.builder("test").putMapping("{ \"properties\": { \"foo\": { \"type\": \"text\"}}}").settings(settings).primaryTerm(0, 1).build();
IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null);
recoverShardFromStore(primary);
indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}");
IndexShard replica = newShard(primary.shardId(), false, "n2", metadata, null);
recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener) {
@Override
public void indexTranslogOperations(final List<Translog.Operation> operations, final int totalTranslogOps, final long maxSeenAutoIdTimestamp, final long maxSeqNoOfUpdatesOrDeletes, final RetentionLeases retentionLeases, final long mappingVersion, final ActionListener<Long> listener) {
super.indexTranslogOperations(operations, totalTranslogOps, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, retentionLeases, mappingVersion, ActionListener.wrap(r -> {
assertFalse(replica.isSyncNeeded());
listener.onResponse(r);
}, listener::onFailure));
}
}, true, true);
closeShards(primary, replica);
}
use of org.opensearch.index.seqno.RetentionLeases in project OpenSearch by opensearch-project.
the class IndexShardRetentionLeaseTests method testPersistence.
public void testPersistence() throws IOException {
final Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS).build();
final IndexShard indexShard = newStartedShard(true, settings, new InternalEngineFactory());
try {
final int length = randomIntBetween(0, 8);
final long[] minimumRetainingSequenceNumbers = new long[length];
for (int i = 0; i < length; i++) {
minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE);
currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(randomNonNegativeLong()));
indexShard.addRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {
}));
}
currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(Long.MAX_VALUE));
// force the retention leases to persist
indexShard.persistRetentionLeases();
// the written retention leases should equal our current retention leases
final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get();
final RetentionLeases writtenRetentionLeases = indexShard.loadRetentionLeases();
assertThat(writtenRetentionLeases.version(), equalTo(1L + length));
assertThat(writtenRetentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0])));
// when we recover, we should recover the retention leases
final IndexShard recoveredShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE));
try {
recoverShardFromStore(recoveredShard);
final RetentionLeases recoveredRetentionLeases = recoveredShard.getEngine().config().retentionLeasesSupplier().get();
assertThat(recoveredRetentionLeases.version(), equalTo(1L + length));
assertThat(recoveredRetentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0])));
} finally {
closeShards(recoveredShard);
}
// we should not recover retention leases when force-allocating a stale primary
final IndexShard forceRecoveredShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE));
try {
recoverShardFromStore(forceRecoveredShard);
final RetentionLeases recoveredRetentionLeases = forceRecoveredShard.getEngine().config().retentionLeasesSupplier().get();
assertThat(recoveredRetentionLeases.leases(), hasSize(1));
assertThat(recoveredRetentionLeases.leases().iterator().next().id(), equalTo(ReplicationTracker.getPeerRecoveryRetentionLeaseId(indexShard.routingEntry())));
assertThat(recoveredRetentionLeases.version(), equalTo(1L));
} finally {
closeShards(forceRecoveredShard);
}
} finally {
closeShards(indexShard);
}
}
Aggregations