use of org.apache.lucene.index.IndexCommit in project elasticsearch by elastic.
the class IndexShard method snapshotStoreMetadata.
/**
* gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard,
* without having to worry about the current state of the engine and concurrent flushes.
*
* @throws org.apache.lucene.index.IndexNotFoundException if no index is found in the current directory
* @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an
* unexpected exception when opening the index reading the segments file.
* @throws IndexFormatTooOldException if the lucene index is too old to be opened.
* @throws IndexFormatTooNewException if the lucene index is too new to be opened.
* @throws FileNotFoundException if one or more files referenced by a commit are not present.
* @throws NoSuchFileException if one or more files referenced by a commit are not present.
*/
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
IndexCommit indexCommit = null;
store.incRef();
try {
synchronized (mutex) {
// if the engine is not running, we can access the store directly, but we need to make sure no one starts
// the engine on us. If the engine is running, we can get a snapshot via the deletion policy which is initialized.
// That can be done out of mutex, since the engine can be closed half way.
Engine engine = getEngineOrNull();
if (engine == null) {
try (Lock ignored = store.directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
return store.getMetadata(null);
}
}
}
indexCommit = deletionPolicy.snapshot();
return store.getMetadata(indexCommit);
} finally {
store.decRef();
if (indexCommit != null) {
deletionPolicy.release(indexCommit);
}
}
}
use of org.apache.lucene.index.IndexCommit in project elasticsearch by elastic.
the class InternalEngineTests method testConcurrentWritesAndCommits.
// this test writes documents to the engine while concurrently flushing/commit
// and ensuring that the commit points contain the correct sequence number data
public void testConcurrentWritesAndCommits() throws Exception {
try (Store store = createStore();
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), new SnapshotDeletionPolicy(NoDeletionPolicy.INSTANCE), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
final int numIndexingThreads = scaledRandomIntBetween(3, 6);
final int numDocsPerThread = randomIntBetween(500, 1000);
final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
final List<Thread> indexingThreads = new ArrayList<>();
// create N indexing threads to index documents simultaneously
for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
final int threadIdx = threadNum;
Thread indexingThread = new Thread(() -> {
try {
// wait for all threads to start at the same time
barrier.await();
// index random number of docs
for (int i = 0; i < numDocsPerThread; i++) {
final String id = "thread" + threadIdx + "#" + i;
ParsedDocument doc = testParsedDocument(id, "test", null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
});
indexingThreads.add(indexingThread);
}
// start the indexing threads
for (Thread thread : indexingThreads) {
thread.start();
}
// wait for indexing threads to all be ready to start
barrier.await();
// create random commit points
boolean doneIndexing;
do {
doneIndexing = indexingThreads.stream().filter(Thread::isAlive).count() == 0;
//engine.flush(); // flush and commit
} while (doneIndexing == false);
// now, verify all the commits have the correct docs according to the user commit data
long prevLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED;
long prevMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
for (IndexCommit commit : DirectoryReader.listCommits(store.directory())) {
Map<String, String> userData = commit.getUserData();
long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ? Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : SequenceNumbersService.NO_OPS_PERFORMED;
long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ? Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : SequenceNumbersService.UNASSIGNED_SEQ_NO;
// local checkpoint and max seq no shouldn't go backwards
assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint));
assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo));
try (IndexReader reader = DirectoryReader.open(commit)) {
FieldStats stats = SeqNoFieldMapper.SeqNoDefaults.FIELD_TYPE.stats(reader);
final long highestSeqNo;
if (stats != null) {
highestSeqNo = (long) stats.getMaxValue();
} else {
highestSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
}
// make sure localCheckpoint <= highest seq no found <= maxSeqNo
assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint));
assertThat(highestSeqNo, lessThanOrEqualTo(maxSeqNo));
// make sure all sequence numbers up to and including the local checkpoint are in the index
FixedBitSet seqNosBitSet = getSeqNosSet(reader, highestSeqNo);
for (int i = 0; i <= localCheckpoint; i++) {
assertTrue("local checkpoint [" + localCheckpoint + "], _seq_no [" + i + "] should be indexed", seqNosBitSet.get(i));
}
}
prevLocalCheckpoint = localCheckpoint;
prevMaxSeqNo = maxSeqNo;
}
}
}
use of org.apache.lucene.index.IndexCommit in project elasticsearch by elastic.
the class RecoverySourceHandlerTests method testThrowExceptionOnPrimaryRelocatedBeforePhase1Completed.
public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Completed() throws IOException {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final boolean attemptSequenceNumberBasedRecovery = randomBoolean();
final boolean isTranslogReadyForSequenceNumberBasedRecovery = attemptSequenceNumberBasedRecovery && randomBoolean();
final StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, false, randomNonNegativeLong(), attemptSequenceNumberBasedRecovery ? randomNonNegativeLong() : SequenceNumbersService.UNASSIGNED_SEQ_NO);
final IndexShard shard = mock(IndexShard.class);
when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class));
when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class));
final Translog.View translogView = mock(Translog.View.class);
when(shard.acquireTranslogView()).thenReturn(translogView);
when(shard.state()).thenReturn(IndexShardState.RELOCATED);
final AtomicBoolean phase1Called = new AtomicBoolean();
final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean();
final AtomicBoolean phase2Called = new AtomicBoolean();
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, mock(RecoveryTargetHandler.class), request, () -> 0L, e -> () -> {
}, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
@Override
boolean isTranslogReadyForSequenceNumberBasedRecovery(final Translog.View translogView) {
return isTranslogReadyForSequenceNumberBasedRecovery;
}
@Override
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
phase1Called.set(true);
}
@Override
void prepareTargetForTranslog(final int totalTranslogOps, final long maxUnsafeAutoIdTimestamp) throws IOException {
prepareTargetForTranslogCalled.set(true);
}
@Override
void phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException {
phase2Called.set(true);
}
};
expectThrows(IndexShardRelocatedException.class, handler::recoverToTarget);
// phase1 should only be attempted if we are not doing a sequence-number-based recovery
assertThat(phase1Called.get(), equalTo(!isTranslogReadyForSequenceNumberBasedRecovery));
assertTrue(prepareTargetForTranslogCalled.get());
assertFalse(phase2Called.get());
}
use of org.apache.lucene.index.IndexCommit in project elasticsearch by elastic.
the class RecoverySourceHandlerTests method testWaitForClusterStateOnPrimaryRelocation.
public void testWaitForClusterStateOnPrimaryRelocation() throws IOException, InterruptedException {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final boolean attemptSequenceNumberBasedRecovery = randomBoolean();
final boolean isTranslogReadyForSequenceNumberBasedRecovery = attemptSequenceNumberBasedRecovery && randomBoolean();
final StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, true, randomNonNegativeLong(), attemptSequenceNumberBasedRecovery ? randomNonNegativeLong() : SequenceNumbersService.UNASSIGNED_SEQ_NO);
final AtomicBoolean phase1Called = new AtomicBoolean();
final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean();
final AtomicBoolean phase2Called = new AtomicBoolean();
final AtomicBoolean ensureClusterStateVersionCalled = new AtomicBoolean();
final AtomicBoolean recoveriesDelayed = new AtomicBoolean();
final AtomicBoolean relocated = new AtomicBoolean();
final IndexShard shard = mock(IndexShard.class);
when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class));
when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class));
final Translog.View translogView = mock(Translog.View.class);
when(shard.acquireTranslogView()).thenReturn(translogView);
when(shard.state()).then(i -> relocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED);
doAnswer(i -> {
relocated.set(true);
assertTrue(recoveriesDelayed.get());
return null;
}).when(shard).relocated(any(String.class));
final Supplier<Long> currentClusterStateVersionSupplier = () -> {
assertFalse(ensureClusterStateVersionCalled.get());
assertTrue(recoveriesDelayed.get());
ensureClusterStateVersionCalled.set(true);
return 0L;
};
final Function<String, Releasable> delayNewRecoveries = s -> {
assertThat(phase1Called.get(), equalTo(!isTranslogReadyForSequenceNumberBasedRecovery));
assertTrue(prepareTargetForTranslogCalled.get());
assertTrue(phase2Called.get());
assertFalse(recoveriesDelayed.get());
recoveriesDelayed.set(true);
return () -> {
assertTrue(recoveriesDelayed.get());
recoveriesDelayed.set(false);
};
};
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, mock(RecoveryTargetHandler.class), request, currentClusterStateVersionSupplier, delayNewRecoveries, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
@Override
boolean isTranslogReadyForSequenceNumberBasedRecovery(final Translog.View translogView) {
return isTranslogReadyForSequenceNumberBasedRecovery;
}
@Override
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
phase1Called.set(true);
}
@Override
void prepareTargetForTranslog(final int totalTranslogOps, final long maxUnsafeAutoIdTimestamp) throws IOException {
prepareTargetForTranslogCalled.set(true);
}
@Override
void phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException {
phase2Called.set(true);
}
};
handler.recoverToTarget();
assertTrue(ensureClusterStateVersionCalled.get());
// phase1 should only be attempted if we are not doing a sequence-number-based recovery
assertThat(phase1Called.get(), equalTo(!isTranslogReadyForSequenceNumberBasedRecovery));
assertTrue(prepareTargetForTranslogCalled.get());
assertTrue(phase2Called.get());
assertTrue(relocated.get());
assertFalse(recoveriesDelayed.get());
}
use of org.apache.lucene.index.IndexCommit in project lucene-solr by apache.
the class SolrDeletionPolicy method updateCommits.
private void updateCommits(List<? extends IndexCommit> commits) {
synchronized (this) {
long maxCommitAgeTimeStamp = -1L;
IndexCommit newest = commits.get(commits.size() - 1);
log.debug("newest commit generation = " + newest.getGeneration());
int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
int totalKept = 1;
// work our way from newest to oldest, skipping the first since we always want to keep it.
for (int i = commits.size() - 2; i >= 0; i--) {
IndexCommit commit = commits.get(i);
// delete anything too old, regardless of other policies
try {
if (maxCommitAge != null) {
if (maxCommitAgeTimeStamp == -1) {
DateMathParser dmp = new DateMathParser(DateMathParser.UTC);
maxCommitAgeTimeStamp = dmp.parseMath(maxCommitAge).getTime();
}
if (IndexDeletionPolicyWrapper.getCommitTimestamp(commit) < maxCommitAgeTimeStamp) {
commit.delete();
continue;
}
}
} catch (Exception e) {
log.warn("Exception while checking commit point's age for deletion", e);
}
if (singleSegKept < maxOptimizedCommitsToKeep && commit.getSegmentCount() == 1) {
totalKept++;
singleSegKept++;
continue;
}
if (totalKept < maxCommitsToKeep) {
totalKept++;
continue;
}
commit.delete();
}
}
// end synchronized
}
Aggregations