use of org.elasticsearch.index.seqno.ReplicationTracker in project crate by crate.
the class EngineTestCase method config.
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, ReferenceManager.RefreshListener externalRefreshListener, ReferenceManager.RefreshListener internalRefreshListener, @Nullable LongSupplier maybeGlobalCheckpointSupplier, @Nullable Supplier<RetentionLeases> maybeRetentionLeasesSupplier) {
IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
Engine.EventListener eventListener = new Engine.EventListener() {
@Override
public void onFailedEngine(String reason, @Nullable Exception e) {
// we don't need to notify anybody in this test
}
};
final List<ReferenceManager.RefreshListener> extRefreshListenerList = externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener);
final List<ReferenceManager.RefreshListener> intRefreshListenerList = internalRefreshListener == null ? emptyList() : Collections.singletonList(internalRefreshListener);
final LongSupplier globalCheckpointSupplier;
final Supplier<RetentionLeases> retentionLeasesSupplier;
if (maybeGlobalCheckpointSupplier == null) {
assert maybeRetentionLeasesSupplier == null;
final ReplicationTracker replicationTracker = new ReplicationTracker(shardId, allocationId.getId(), indexSettings, randomNonNegativeLong(), SequenceNumbers.NO_OPS_PERFORMED, update -> {
}, () -> 0L, (leases, listener) -> {
}, () -> SafeCommitInfo.EMPTY);
globalCheckpointSupplier = replicationTracker;
retentionLeasesSupplier = replicationTracker::getRetentionLeases;
} else {
assert maybeRetentionLeasesSupplier != null;
globalCheckpointSupplier = maybeGlobalCheckpointSupplier;
retentionLeasesSupplier = maybeRetentionLeasesSupplier;
}
return new EngineConfig(shardId, allocationId.getId(), threadPool, indexSettings, store, mergePolicy, iwc.getAnalyzer(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), extRefreshListenerList, intRefreshListenerList, new NoneCircuitBreakerService(), globalCheckpointSupplier, retentionLeasesSupplier, primaryTerm, tombstoneDocSupplier());
}
use of org.elasticsearch.index.seqno.ReplicationTracker in project crate by crate.
the class IndexShard method resetEngineToGlobalCheckpoint.
/**
* Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
*/
void resetEngineToGlobalCheckpoint() throws IOException {
assert Thread.holdsLock(mutex) == false : "resetting engine under mutex";
assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "resetting engine without blocking operations; active operations are [" + getActiveOperations() + ']';
// persist the global checkpoint to disk
sync();
final SeqNoStats seqNoStats = seqNoStats();
final TranslogStats translogStats = translogStats();
// flush to make sure the latest commit, which will be opened by the read-only engine, includes all operations.
flush(new FlushRequest().waitIfOngoing(true));
SetOnce<Engine> newEngineReference = new SetOnce<>();
final long globalCheckpoint = getLastKnownGlobalCheckpoint();
assert globalCheckpoint == getLastSyncedGlobalCheckpoint();
synchronized (engineMutex) {
verifyNotClosed();
// we must create both new read-only engine and new read-write engine under engineMutex to ensure snapshotStoreMetadata,
// acquireXXXCommit and close works.
final Engine readOnlyEngine = new ReadOnlyEngine(newEngineConfig(replicationTracker), seqNoStats, translogStats, false, Function.identity()) {
@Override
public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
// ignore flushFirst since we flushed above and we do not want to interfere with ongoing translog replay
return newEngineReference.get().acquireLastIndexCommit(false);
}
}
@Override
public IndexCommitRef acquireSafeIndexCommit() {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
return newEngineReference.get().acquireSafeIndexCommit();
}
}
@Override
public void close() throws IOException {
assert Thread.holdsLock(engineMutex);
Engine newEngine = newEngineReference.get();
if (newEngine == currentEngineReference.get()) {
// we successfully installed the new engine so do not close it.
newEngine = null;
}
IOUtils.close(super::close, newEngine);
}
};
IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine));
newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker)));
onNewEngine(newEngineReference.get());
}
final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {
// TODO: add a dedicate recovery stats for the reset translog
});
newEngineReference.get().recoverFromTranslog(translogRunner, globalCheckpoint);
newEngineReference.get().refresh("reset_engine");
synchronized (engineMutex) {
verifyNotClosed();
IOUtils.close(currentEngineReference.getAndSet(newEngineReference.get()));
// We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
onSettingsChanged();
}
use of org.elasticsearch.index.seqno.ReplicationTracker in project crate by crate.
the class NoOpEngineTests method testTrimUnreferencedTranslogFiles.
@Test
public void testTrimUnreferencedTranslogFiles() throws Exception {
final ReplicationTracker tracker = (ReplicationTracker) engine.config().getGlobalCheckpointSupplier();
ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node", null, true, ShardRoutingState.STARTED, allocationId);
IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build();
tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table);
tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED);
engine.onSettingsChanged(TimeValue.MINUS_ONE, ByteSizeValue.ZERO, randomNonNegativeLong());
final int numDocs = scaledRandomIntBetween(10, 3000);
int totalTranslogOps = 0;
for (int i = 0; i < numDocs; i++) {
totalTranslogOps++;
engine.index(indexForDoc(createParsedDoc(Integer.toString(i), null)));
tracker.updateLocalCheckpoint(allocationId.getId(), i);
if (rarely()) {
totalTranslogOps = 0;
engine.flush();
}
if (randomBoolean()) {
engine.rollTranslogGeneration();
}
}
// prevent translog from trimming so we can test trimUnreferencedFiles in NoOpEngine.
final Translog.Snapshot snapshot = engine.getTranslog().newSnapshot();
engine.flush(true, true);
engine.close();
final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker));
assertThat(noOpEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(totalTranslogOps));
noOpEngine.trimUnreferencedTranslogFiles();
assertThat(noOpEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(0));
assertThat(noOpEngine.getTranslogStats().getUncommittedOperations(), equalTo(0));
assertThat(noOpEngine.getTranslogStats().getTranslogSizeInBytes(), equalTo((long) Translog.DEFAULT_HEADER_SIZE_IN_BYTES));
snapshot.close();
noOpEngine.close();
}
use of org.elasticsearch.index.seqno.ReplicationTracker in project crate by crate.
the class NoOpEngineTests method testNoopAfterRegularEngine.
@Test
public void testNoopAfterRegularEngine() throws IOException {
int docs = randomIntBetween(1, 10);
ReplicationTracker tracker = (ReplicationTracker) engine.config().getGlobalCheckpointSupplier();
ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node", null, true, ShardRoutingState.STARTED, allocationId);
IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build();
tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table);
tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED);
for (int i = 0; i < docs; i++) {
ParsedDocument doc = testParsedDocument("" + i, null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
tracker.updateLocalCheckpoint(allocationId.getId(), i);
}
engine.flush(true, true);
long localCheckpoint = engine.getPersistedLocalCheckpoint();
long maxSeqNo = engine.getSeqNoStats(100L).getMaxSeqNo();
engine.close();
final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker));
assertThat(noOpEngine.getPersistedLocalCheckpoint(), equalTo(localCheckpoint));
assertThat(noOpEngine.getSeqNoStats(100L).getMaxSeqNo(), equalTo(maxSeqNo));
try (Engine.IndexCommitRef ref = noOpEngine.acquireLastIndexCommit(false)) {
try (IndexReader reader = DirectoryReader.open(ref.getIndexCommit())) {
assertThat(reader.numDocs(), equalTo(docs));
}
}
noOpEngine.close();
}
use of org.elasticsearch.index.seqno.ReplicationTracker in project crate by crate.
the class InternalEngineTests method testSeqNoAndCheckpoints.
@Test
public void testSeqNoAndCheckpoints() throws IOException, InterruptedException {
final int opCount = randomIntBetween(1, 256);
long primarySeqNo = SequenceNumbers.NO_OPS_PERFORMED;
final String[] ids = new String[] { "1", "2", "3" };
final Set<String> indexedIds = new HashSet<>();
long localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
long replicaLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
final long globalCheckpoint;
long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
IOUtils.close(store, engine);
store = createStore();
InternalEngine initialEngine = null;
try {
initialEngine = createEngine(defaultSettings, store, createTempDir(), newLogMergePolicy(), null);
final ShardRouting primary = TestShardRouting.newShardRouting("test", shardId.id(), "node1", null, true, ShardRoutingState.STARTED, allocationId);
final ShardRouting initializingReplica = TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.INITIALIZING);
ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier();
gcpTracker.updateFromMaster(1L, new HashSet<>(Collections.singletonList(primary.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).build());
gcpTracker.activatePrimaryMode(primarySeqNo);
if (defaultSettings.isSoftDeleteEnabled()) {
final CountDownLatch countDownLatch = new CountDownLatch(1);
gcpTracker.addPeerRecoveryRetentionLease(initializingReplica.currentNodeId(), SequenceNumbers.NO_OPS_PERFORMED, ActionListener.wrap(countDownLatch::countDown));
countDownLatch.await(5, TimeUnit.SECONDS);
}
gcpTracker.updateFromMaster(2L, new HashSet<>(Collections.singletonList(primary.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(initializingReplica).build());
gcpTracker.initiateTracking(initializingReplica.allocationId().getId());
gcpTracker.markAllocationIdAsInSync(initializingReplica.allocationId().getId(), replicaLocalCheckpoint);
final ShardRouting replica = initializingReplica.moveToStarted();
gcpTracker.updateFromMaster(3L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build());
for (int op = 0; op < opCount; op++) {
final String id;
// mostly index, sometimes delete
if (rarely() && indexedIds.isEmpty() == false) {
// we have some docs indexed, so delete one of them
id = randomFrom(indexedIds);
final Engine.Delete delete = new Engine.Delete(id, newUid(id), UNASSIGNED_SEQ_NO, primaryTerm.get(), rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
final Engine.DeleteResult result = initialEngine.delete(delete);
if (result.getResultType() == Engine.Result.Type.SUCCESS) {
assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo + 1));
indexedIds.remove(id);
primarySeqNo++;
} else {
assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo));
}
} else {
// index a document
id = randomFrom(ids);
ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null);
final Engine.Index index = new Engine.Index(newUid(doc), doc, UNASSIGNED_SEQ_NO, primaryTerm.get(), rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
final Engine.IndexResult result = initialEngine.index(index);
if (result.getResultType() == Engine.Result.Type.SUCCESS) {
assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo + 1));
indexedIds.add(id);
primarySeqNo++;
} else {
assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo));
}
}
// to advance persisted local checkpoint
initialEngine.syncTranslog();
if (randomInt(10) < 3) {
// only update rarely as we do it every doc
replicaLocalCheckpoint = randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo));
}
gcpTracker.updateLocalCheckpoint(primary.allocationId().getId(), initialEngine.getPersistedLocalCheckpoint());
gcpTracker.updateLocalCheckpoint(initializingReplica.allocationId().getId(), replicaLocalCheckpoint);
if (rarely()) {
localCheckpoint = primarySeqNo;
maxSeqNo = primarySeqNo;
initialEngine.flush(true, true);
}
}
logger.info("localcheckpoint {}, global {}", replicaLocalCheckpoint, primarySeqNo);
globalCheckpoint = gcpTracker.getGlobalCheckpoint();
assertEquals(primarySeqNo, initialEngine.getSeqNoStats(-1).getMaxSeqNo());
assertEquals(primarySeqNo, initialEngine.getPersistedLocalCheckpoint());
assertThat(globalCheckpoint, equalTo(replicaLocalCheckpoint));
assertThat(Long.parseLong(initialEngine.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(localCheckpoint));
// to guarantee the global checkpoint is written to the translog checkpoint
initialEngine.getTranslog().sync();
assertThat(initialEngine.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(globalCheckpoint));
assertThat(Long.parseLong(initialEngine.commitStats().getUserData().get(SequenceNumbers.MAX_SEQ_NO)), equalTo(maxSeqNo));
} finally {
IOUtils.close(initialEngine);
}
try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())) {
recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo());
assertThat(Long.parseLong(recoveringEngine.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(primarySeqNo));
assertThat(recoveringEngine.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(globalCheckpoint));
assertThat(Long.parseLong(recoveringEngine.commitStats().getUserData().get(SequenceNumbers.MAX_SEQ_NO)), // we have assigned sequence numbers to should be in the commit
equalTo(primarySeqNo));
assertThat(recoveringEngine.getProcessedLocalCheckpoint(), equalTo(primarySeqNo));
assertThat(recoveringEngine.getPersistedLocalCheckpoint(), equalTo(primarySeqNo));
assertThat(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo));
assertThat(generateNewSeqNo(recoveringEngine), equalTo(primarySeqNo + 1));
}
}
Aggregations