use of org.opensearch.cluster.routing.ShardRouting in project OpenSearch by opensearch-project.
the class IndexShardTestCase method recoverShardFromSnapshot.
/**
* Recover a shard from a snapshot using a given repository *
*/
protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot snapshot, final Repository repository) {
final Version version = Version.CURRENT;
final ShardId shardId = shard.shardId();
final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID());
final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId());
final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, version, indexId);
final ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource);
shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null));
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
repository.restoreShard(shard.store(), snapshot.getSnapshotId(), indexId, shard.shardId(), shard.recoveryState(), future);
future.actionGet();
}
use of org.opensearch.cluster.routing.ShardRouting in project OpenSearch by opensearch-project.
the class InternalTestCluster method assertSeqNos.
public void assertSeqNos() throws Exception {
assertBusy(() -> {
final ClusterState state = clusterService().state();
for (ObjectObjectCursor<String, IndexRoutingTable> indexRoutingTable : state.routingTable().indicesRouting()) {
for (IntObjectCursor<IndexShardRoutingTable> indexShardRoutingTable : indexRoutingTable.value.shards()) {
ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard();
final IndexShard primaryShard = getShardOrNull(state, primaryShardRouting);
if (primaryShard == null) {
// just ignore - shard movement
continue;
}
final SeqNoStats primarySeqNoStats;
final ObjectLongMap<String> syncGlobalCheckpoints;
try {
primarySeqNoStats = primaryShard.seqNoStats();
syncGlobalCheckpoints = primaryShard.getInSyncGlobalCheckpoints();
} catch (AlreadyClosedException ex) {
// shard is closed - just ignore
continue;
}
assertThat(primaryShardRouting + " should have set the global checkpoint", primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)));
for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) {
final IndexShard replicaShard = getShardOrNull(state, replicaShardRouting);
if (replicaShard == null) {
// just ignore - shard movement
continue;
}
final SeqNoStats seqNoStats;
try {
seqNoStats = replicaShard.seqNoStats();
} catch (AlreadyClosedException e) {
// shard is closed - just ignore
continue;
}
assertThat(replicaShardRouting + " seq_no_stats mismatch", seqNoStats, equalTo(primarySeqNoStats));
// the local knowledge on the primary of the global checkpoint equals the global checkpoint on the shard
assertThat(replicaShardRouting + " global checkpoint syncs mismatch", seqNoStats.getGlobalCheckpoint(), equalTo(syncGlobalCheckpoints.get(replicaShardRouting.allocationId().getId())));
}
}
}
}, 30, TimeUnit.SECONDS);
}
use of org.opensearch.cluster.routing.ShardRouting in project OpenSearch by opensearch-project.
the class TestGatewayAllocator method applyFailedShards.
@Override
public void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation) {
currentNodes = allocation.nodes();
for (FailedShard failedShard : failedShards) {
final ShardRouting failedRouting = failedShard.getRoutingEntry();
Map<ShardId, ShardRouting> nodeAllocations = knownAllocations.get(failedRouting.currentNodeId());
if (nodeAllocations != null) {
nodeAllocations.remove(failedRouting.shardId());
if (nodeAllocations.isEmpty()) {
knownAllocations.remove(failedRouting.currentNodeId());
}
}
}
}
use of org.opensearch.cluster.routing.ShardRouting in project OpenSearch by opensearch-project.
the class InternalEngineTests method testSeqNoAndCheckpoints.
public void testSeqNoAndCheckpoints() throws IOException, InterruptedException {
final int opCount = randomIntBetween(1, 256);
long primarySeqNo = SequenceNumbers.NO_OPS_PERFORMED;
final String[] ids = new String[] { "1", "2", "3" };
final Set<String> indexedIds = new HashSet<>();
long localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
long replicaLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
final long globalCheckpoint;
long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
IOUtils.close(store, engine);
store = createStore();
InternalEngine initialEngine = null;
try {
initialEngine = createEngine(defaultSettings, store, createTempDir(), newLogMergePolicy(), null);
final ShardRouting primary = TestShardRouting.newShardRouting("test", shardId.id(), "node1", null, true, ShardRoutingState.STARTED, allocationId);
final ShardRouting initializingReplica = TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.INITIALIZING);
ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier();
gcpTracker.updateFromMaster(1L, new HashSet<>(Collections.singletonList(primary.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).build());
gcpTracker.activatePrimaryMode(primarySeqNo);
if (defaultSettings.isSoftDeleteEnabled()) {
final CountDownLatch countDownLatch = new CountDownLatch(1);
gcpTracker.addPeerRecoveryRetentionLease(initializingReplica.currentNodeId(), SequenceNumbers.NO_OPS_PERFORMED, ActionListener.wrap(countDownLatch::countDown));
countDownLatch.await();
}
gcpTracker.updateFromMaster(2L, new HashSet<>(Collections.singletonList(primary.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(initializingReplica).build());
gcpTracker.initiateTracking(initializingReplica.allocationId().getId());
gcpTracker.markAllocationIdAsInSync(initializingReplica.allocationId().getId(), replicaLocalCheckpoint);
final ShardRouting replica = initializingReplica.moveToStarted();
gcpTracker.updateFromMaster(3L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build());
for (int op = 0; op < opCount; op++) {
final String id;
// mostly index, sometimes delete
if (rarely() && indexedIds.isEmpty() == false) {
// we have some docs indexed, so delete one of them
id = randomFrom(indexedIds);
final Engine.Delete delete = new Engine.Delete("test", id, newUid(id), UNASSIGNED_SEQ_NO, primaryTerm.get(), rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
final Engine.DeleteResult result = initialEngine.delete(delete);
if (result.getResultType() == Engine.Result.Type.SUCCESS) {
assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo + 1));
indexedIds.remove(id);
primarySeqNo++;
} else {
assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo));
}
} else {
// index a document
id = randomFrom(ids);
ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null);
final Engine.Index index = new Engine.Index(newUid(doc), doc, UNASSIGNED_SEQ_NO, primaryTerm.get(), rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
final Engine.IndexResult result = initialEngine.index(index);
if (result.getResultType() == Engine.Result.Type.SUCCESS) {
assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo + 1));
indexedIds.add(id);
primarySeqNo++;
} else {
assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO));
assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo));
}
}
// to advance persisted local checkpoint
initialEngine.syncTranslog();
if (randomInt(10) < 3) {
// only update rarely as we do it every doc
replicaLocalCheckpoint = randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo));
}
gcpTracker.updateLocalCheckpoint(primary.allocationId().getId(), initialEngine.getPersistedLocalCheckpoint());
gcpTracker.updateLocalCheckpoint(replica.allocationId().getId(), replicaLocalCheckpoint);
if (rarely()) {
localCheckpoint = primarySeqNo;
maxSeqNo = primarySeqNo;
initialEngine.flush(true, true);
}
}
logger.info("localcheckpoint {}, global {}", replicaLocalCheckpoint, primarySeqNo);
globalCheckpoint = gcpTracker.getGlobalCheckpoint();
assertEquals(primarySeqNo, initialEngine.getSeqNoStats(-1).getMaxSeqNo());
assertEquals(primarySeqNo, initialEngine.getPersistedLocalCheckpoint());
assertThat(globalCheckpoint, equalTo(replicaLocalCheckpoint));
assertThat(Long.parseLong(initialEngine.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(localCheckpoint));
// to guarantee the global checkpoint is written to the translog checkpoint
initialEngine.getTranslog().sync();
assertThat(initialEngine.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(globalCheckpoint));
assertThat(Long.parseLong(initialEngine.commitStats().getUserData().get(SequenceNumbers.MAX_SEQ_NO)), equalTo(maxSeqNo));
} finally {
IOUtils.close(initialEngine);
}
try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())) {
recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo());
assertThat(Long.parseLong(recoveringEngine.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(primarySeqNo));
assertThat(recoveringEngine.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(globalCheckpoint));
assertThat(Long.parseLong(recoveringEngine.commitStats().getUserData().get(SequenceNumbers.MAX_SEQ_NO)), // we have assigned sequence numbers to should be in the commit
equalTo(primarySeqNo));
assertThat(recoveringEngine.getProcessedLocalCheckpoint(), equalTo(primarySeqNo));
assertThat(recoveringEngine.getPersistedLocalCheckpoint(), equalTo(primarySeqNo));
assertThat(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo));
assertThat(generateNewSeqNo(recoveringEngine), equalTo(primarySeqNo + 1));
}
}
use of org.opensearch.cluster.routing.ShardRouting in project OpenSearch by opensearch-project.
the class NoOpEngineTests method testNoopAfterRegularEngine.
public void testNoopAfterRegularEngine() throws IOException {
int docs = randomIntBetween(1, 10);
ReplicationTracker tracker = (ReplicationTracker) engine.config().getGlobalCheckpointSupplier();
ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node", null, true, ShardRoutingState.STARTED, allocationId);
IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build();
tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table);
tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED);
for (int i = 0; i < docs; i++) {
ParsedDocument doc = testParsedDocument("" + i, null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
tracker.updateLocalCheckpoint(allocationId.getId(), i);
}
engine.flush(true, true);
long localCheckpoint = engine.getPersistedLocalCheckpoint();
long maxSeqNo = engine.getSeqNoStats(100L).getMaxSeqNo();
engine.close();
final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker));
assertThat(noOpEngine.getPersistedLocalCheckpoint(), equalTo(localCheckpoint));
assertThat(noOpEngine.getSeqNoStats(100L).getMaxSeqNo(), equalTo(maxSeqNo));
try (Engine.IndexCommitRef ref = noOpEngine.acquireLastIndexCommit(false)) {
try (IndexReader reader = DirectoryReader.open(ref.getIndexCommit())) {
assertThat(reader.numDocs(), equalTo(docs));
}
}
noOpEngine.close();
}
Aggregations