use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class InternalEngineTests method testConcurrentWritesAndCommits.
// this test writes documents to the engine while concurrently flushing/commit
// and ensuring that the commit points contain the correct sequence number data
@Test
public void testConcurrentWritesAndCommits() throws Exception {
List<Engine.IndexCommitRef> commits = new ArrayList<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) {
final int numIndexingThreads = scaledRandomIntBetween(2, 4);
final int numDocsPerThread = randomIntBetween(500, 1000);
final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
final List<Thread> indexingThreads = new ArrayList<>();
final CountDownLatch doneLatch = new CountDownLatch(numIndexingThreads);
// create N indexing threads to index documents simultaneously
for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
final int threadIdx = threadNum;
Thread indexingThread = new Thread(() -> {
try {
// wait for all threads to start at the same time
barrier.await();
// index random number of docs
for (int i = 0; i < numDocsPerThread; i++) {
final String id = "thread" + threadIdx + "#" + i;
ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
doneLatch.countDown();
}
});
indexingThreads.add(indexingThread);
}
// start the indexing threads
for (Thread thread : indexingThreads) {
thread.start();
}
// wait for indexing threads to all be ready to start
barrier.await();
int commitLimit = randomIntBetween(10, 20);
long sleepTime = 1;
// create random commit points
boolean doneIndexing;
do {
doneIndexing = doneLatch.await(sleepTime, TimeUnit.MILLISECONDS);
commits.add(engine.acquireLastIndexCommit(true));
if (commits.size() > commitLimit) {
// don't keep on piling up too many commits
IOUtils.close(commits.remove(randomIntBetween(0, commits.size() - 1)));
// we increase the wait time to make sure we eventually if things are slow wait for threads to finish.
// this will reduce pressure on disks and will allow threads to make progress without piling up too many commits
sleepTime = sleepTime * 2;
}
} while (doneIndexing == false);
// now, verify all the commits have the correct docs according to the user commit data
long prevLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
long prevMaxSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
for (Engine.IndexCommitRef commitRef : commits) {
final IndexCommit commit = commitRef.getIndexCommit();
Map<String, String> userData = commit.getUserData();
long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ? Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : SequenceNumbers.NO_OPS_PERFORMED;
long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ? Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : UNASSIGNED_SEQ_NO;
// local checkpoint and max seq no shouldn't go backwards
assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint));
assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo));
try (IndexReader reader = DirectoryReader.open(commit)) {
Long highest = getHighestSeqNo(reader);
final long highestSeqNo;
if (highest != null) {
highestSeqNo = highest.longValue();
} else {
highestSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
}
// make sure localCheckpoint <= highest seq no found <= maxSeqNo
assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint));
assertThat(highestSeqNo, lessThanOrEqualTo(maxSeqNo));
// make sure all sequence numbers up to and including the local checkpoint are in the index
FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo);
for (int i = 0; i <= localCheckpoint; i++) {
assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed", seqNosBitSet.get(i));
}
}
prevLocalCheckpoint = localCheckpoint;
prevMaxSeqNo = maxSeqNo;
}
}
}
use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class CombinedDeletionPolicyTests method testCheckUnreferencedCommits.
public void testCheckUnreferencedCommits() throws Exception {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO);
final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0, () -> RetentionLeases.EMPTY);
final UUID translogUUID = UUID.randomUUID();
final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy();
CombinedDeletionPolicy indexPolicy = newCombinedDeletionPolicy(translogPolicy, softDeletesPolicy, globalCheckpoint);
final List<IndexCommit> commitList = new ArrayList<>();
int totalCommits = between(2, 20);
long lastMaxSeqNo = between(1, 1000);
long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo);
for (int i = 0; i < totalCommits; i++) {
lastMaxSeqNo += between(1, 10000);
lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo);
commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID));
}
int safeCommitIndex = randomIntBetween(0, commitList.size() - 1);
globalCheckpoint.set(Long.parseLong(commitList.get(safeCommitIndex).getUserData().get(SequenceNumbers.MAX_SEQ_NO)));
commitList.forEach(this::resetDeletion);
indexPolicy.onCommit(commitList);
if (safeCommitIndex == commitList.size() - 1) {
// Safe commit is the last commit - no need to clean up
assertThat(translogPolicy.getLocalCheckpointOfSafeCommit(), equalTo(getLocalCheckpoint(commitList.get(commitList.size() - 1))));
assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false));
} else {
// Advanced but not enough for any commit after the safe commit becomes safe
IndexCommit nextSafeCommit = commitList.get(safeCommitIndex + 1);
globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), Long.parseLong(nextSafeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)) - 1));
assertFalse(indexPolicy.hasUnreferencedCommits());
// Advanced enough for some index commit becomes safe
globalCheckpoint.set(randomLongBetween(Long.parseLong(nextSafeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)), lastMaxSeqNo));
assertTrue(indexPolicy.hasUnreferencedCommits());
// Advanced enough for the last commit becomes safe
globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE));
commitList.forEach(this::resetDeletion);
indexPolicy.onCommit(commitList);
// Safe commit is the last commit - no need to clean up
assertThat(translogPolicy.getLocalCheckpointOfSafeCommit(), equalTo(getLocalCheckpoint(commitList.get(commitList.size() - 1))));
assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false));
}
}
use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class CombinedDeletionPolicyTests method testAcquireIndexCommit.
public void testAcquireIndexCommit() throws Exception {
final AtomicLong globalCheckpoint = new AtomicLong();
final int extraRetainedOps = between(0, 100);
final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps, () -> RetentionLeases.EMPTY);
final UUID translogUUID = UUID.randomUUID();
TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy();
CombinedDeletionPolicy indexPolicy = newCombinedDeletionPolicy(translogPolicy, softDeletesPolicy, globalCheckpoint);
long lastMaxSeqNo = between(1, 1000);
long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo);
int safeIndex = 0;
List<IndexCommit> commitList = new ArrayList<>();
List<IndexCommit> snapshottingCommits = new ArrayList<>();
final int iters = between(10, 100);
for (int i = 0; i < iters; i++) {
int newCommits = between(1, 10);
for (int n = 0; n < newCommits; n++) {
lastMaxSeqNo += between(1, 1000);
lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo);
commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID));
}
// Advance the global checkpoint to between [safeIndex, safeIndex + 1)
safeIndex = randomIntBetween(safeIndex, commitList.size() - 1);
long lower = Math.max(globalCheckpoint.get(), Long.parseLong(commitList.get(safeIndex).getUserData().get(SequenceNumbers.MAX_SEQ_NO)));
long upper = safeIndex == commitList.size() - 1 ? lastMaxSeqNo : Long.parseLong(commitList.get(safeIndex + 1).getUserData().get(SequenceNumbers.MAX_SEQ_NO)) - 1;
globalCheckpoint.set(randomLongBetween(lower, upper));
commitList.forEach(this::resetDeletion);
indexPolicy.onCommit(commitList);
IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get());
assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo(Math.max(NO_OPS_PERFORMED, Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))));
// Captures and releases some commits
int captures = between(0, 5);
for (int n = 0; n < captures; n++) {
boolean safe = randomBoolean();
final IndexCommit snapshot = indexPolicy.acquireIndexCommit(safe);
expectThrows(UnsupportedOperationException.class, snapshot::delete);
snapshottingCommits.add(snapshot);
if (safe) {
assertThat(snapshot.getUserData(), equalTo(commitList.get(safeIndex).getUserData()));
} else {
assertThat(snapshot.getUserData(), equalTo(commitList.get(commitList.size() - 1).getUserData()));
}
}
final List<IndexCommit> releasingSnapshots = randomSubsetOf(snapshottingCommits);
for (IndexCommit snapshot : releasingSnapshots) {
snapshottingCommits.remove(snapshot);
final long pendingSnapshots = snapshottingCommits.stream().filter(snapshot::equals).count();
final IndexCommit lastCommit = commitList.get(commitList.size() - 1);
safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get());
assertThat(indexPolicy.releaseCommit(snapshot), equalTo(pendingSnapshots == 0 && snapshot.equals(lastCommit) == false && snapshot.equals(safeCommit) == false));
}
// Snapshotting commits must not be deleted.
snapshottingCommits.forEach(snapshot -> assertThat(snapshot.isDeleted(), equalTo(false)));
// We don't need to retain translog for snapshotting commits.
assertThat(translogPolicy.getLocalCheckpointOfSafeCommit(), equalTo(getLocalCheckpoint(commitList.get(safeIndex))));
assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo(Math.max(NO_OPS_PERFORMED, Math.min(getLocalCheckpoint(commitList.get(safeIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))));
}
snapshottingCommits.forEach(indexPolicy::releaseCommit);
globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE));
commitList.forEach(this::resetDeletion);
indexPolicy.onCommit(commitList);
for (int i = 0; i < commitList.size() - 1; i++) {
assertThat(commitList.get(i).isDeleted(), equalTo(true));
}
assertThat(commitList.get(commitList.size() - 1).isDeleted(), equalTo(false));
assertThat(translogPolicy.getLocalCheckpointOfSafeCommit(), equalTo(getLocalCheckpoint(commitList.get(commitList.size() - 1))));
IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get());
assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo(Math.max(NO_OPS_PERFORMED, Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))));
}
use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class CombinedDeletionPolicyTests method testKeepCommitsAfterGlobalCheckpoint.
public void testKeepCommitsAfterGlobalCheckpoint() throws Exception {
final AtomicLong globalCheckpoint = new AtomicLong();
final int extraRetainedOps = between(0, 100);
final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, NO_OPS_PERFORMED, extraRetainedOps, () -> RetentionLeases.EMPTY);
TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy();
CombinedDeletionPolicy indexPolicy = newCombinedDeletionPolicy(translogPolicy, softDeletesPolicy, globalCheckpoint);
final LongArrayList maxSeqNoList = new LongArrayList();
final List<IndexCommit> commitList = new ArrayList<>();
int totalCommits = between(2, 20);
long lastMaxSeqNo = 0;
long lastCheckpoint = lastMaxSeqNo;
final UUID translogUUID = UUID.randomUUID();
for (int i = 0; i < totalCommits; i++) {
lastMaxSeqNo += between(1, 10000);
lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo);
commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID));
maxSeqNoList.add(lastMaxSeqNo);
}
int keptIndex = randomInt(commitList.size() - 1);
final long lower = maxSeqNoList.get(keptIndex);
final long upper = keptIndex == commitList.size() - 1 ? Long.MAX_VALUE : Math.max(maxSeqNoList.get(keptIndex), maxSeqNoList.get(keptIndex + 1) - 1);
globalCheckpoint.set(randomLongBetween(lower, upper));
indexPolicy.onCommit(commitList);
for (int i = 0; i < commitList.size(); i++) {
if (i < keptIndex) {
verify(commitList.get(i), times(1)).delete();
} else {
verify(commitList.get(i), never()).delete();
}
}
assertThat(translogPolicy.getLocalCheckpointOfSafeCommit(), equalTo(getLocalCheckpoint(commitList.get(keptIndex))));
assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo(Math.max(NO_OPS_PERFORMED, Math.min(getLocalCheckpoint(commitList.get(keptIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))));
}
use of org.apache.lucene.index.IndexCommit in project crate by crate.
the class RecoverySourceHandlerTests method testThrowExceptionOnPrimaryRelocatedBeforePhase1Started.
@Test
public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOException {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
final IndexShard shard = mock(IndexShard.class);
when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class));
when(shard.isRelocatedPrimary()).thenReturn(true);
when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class));
doAnswer(invocation -> {
((ActionListener<Releasable>) invocation.getArguments()[0]).onResponse(() -> {
});
return null;
}).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject());
final IndexMetadata.Builder indexMetadata = IndexMetadata.builder("test").settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, between(0, 5)).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()).put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersion(random())).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())));
if (randomBoolean()) {
indexMetadata.state(IndexMetadata.State.CLOSE);
}
when(shard.indexSettings()).thenReturn(new IndexSettings(indexMetadata.build(), Settings.EMPTY));
final AtomicBoolean phase1Called = new AtomicBoolean();
final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean();
final AtomicBoolean phase2Called = new AtomicBoolean();
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, mock(RecoveryTargetHandler.class), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 8), between(1, 8)) {
@Override
void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, ActionListener<SendFileResult> listener) {
phase1Called.set(true);
super.phase1(snapshot, startingSeqNo, translogOps, listener);
}
@Override
void prepareTargetForTranslog(int totalTranslogOps, ActionListener<TimeValue> listener) {
prepareTargetForTranslogCalled.set(true);
super.prepareTargetForTranslog(totalTranslogOps, listener);
}
@Override
void phase2(long startingSeqNo, long endingSeqNo, Translog.Snapshot snapshot, long maxSeenAutoIdTimestamp, long maxSeqNoOfUpdatesOrDeletes, RetentionLeases retentionLeases, long mappingVersion, ActionListener<SendSnapshotResult> listener) throws IOException {
phase2Called.set(true);
super.phase2(startingSeqNo, endingSeqNo, snapshot, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, retentionLeases, mappingVersion, listener);
}
};
PlainActionFuture<RecoveryResponse> future = new PlainActionFuture<>();
expectThrows(IndexShardRelocatedException.class, () -> {
handler.recoverToTarget(future);
future.actionGet();
});
assertFalse(phase1Called.get());
assertFalse(prepareTargetForTranslogCalled.get());
assertFalse(phase2Called.get());
}
Aggregations