use of org.opensearch.index.shard.IndexShard in project OpenSearch by opensearch-project.
the class ShrinkIndexIT method testShrinkCommitsMergeOnIdle.
public void testShrinkCommitsMergeOnIdle() throws Exception {
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("index.number_of_replicas", 0).put("number_of_shards", 5)).get();
for (int i = 0; i < 30; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("source").get();
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", discoveryNodes[0].getName()).put("index.blocks.write", true)).get();
ensureGreen();
IndicesSegmentResponse sourceStats = client().admin().indices().prepareSegments("source").get();
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
// making it hard to pin point the source shards.
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")).get();
// now merge source into a single shard index
assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(Settings.builder().put("index.number_of_replicas", 0).build()).get());
ensureGreen();
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
IndexMetadata target = clusterStateResponse.getState().getMetadata().index("target");
client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get();
IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get();
ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0];
assertTrue(segmentsStats.getNumberOfCommitted() > 0);
assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted());
Iterable<IndicesService> dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
for (IndicesService service : dataNodeInstances) {
if (service.hasIndex(target.getIndex())) {
IndexService indexShards = service.indexService(target.getIndex());
IndexShard shard = indexShards.getShard(0);
assertTrue(shard.isActive());
shard.flushOnIdle(0);
assertFalse(shard.isActive());
}
}
assertBusy(() -> {
IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get();
ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0];
Map<Integer, IndexShardSegments> source = sourceStats.getIndices().get("source").getShards();
int numSourceSegments = 0;
for (IndexShardSegments s : source.values()) {
numSourceSegments += s.getAt(0).getNumberOfCommitted();
}
assertTrue(targetShardSegments.getSegments().size() < numSourceSegments);
assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getNumberOfSearch());
assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getSegments().size());
assertEquals(1, targetShardSegments.getSegments().size());
});
// clean up
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null)).get();
}
use of org.opensearch.index.shard.IndexShard in project OpenSearch by opensearch-project.
the class GlobalCheckpointSyncIT method runGlobalCheckpointSyncTest.
private void runGlobalCheckpointSyncTest(final TimeValue globalCheckpointSyncInterval, final Consumer<Client> beforeIndexing, final Consumer<Client> afterIndexing) throws Exception {
final int numberOfReplicas = randomIntBetween(1, 4);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
prepareCreate("test", Settings.builder().put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), globalCheckpointSyncInterval).put("index.number_of_replicas", numberOfReplicas)).get();
if (randomBoolean()) {
ensureGreen();
}
beforeIndexing.accept(client());
final int numberOfDocuments = randomIntBetween(0, 256);
final int numberOfThreads = randomIntBetween(1, 4);
final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
// start concurrent indexing threads
final List<Thread> threads = new ArrayList<>(numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
final Thread thread = new Thread(() -> {
try {
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new RuntimeException(e);
}
for (int j = 0; j < numberOfDocuments; j++) {
final String id = Integer.toString(index * numberOfDocuments + j);
client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get();
}
try {
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new RuntimeException(e);
}
});
threads.add(thread);
thread.start();
}
// synchronize the start of the threads
barrier.await();
// wait for the threads to finish
barrier.await();
afterIndexing.accept(client());
assertBusy(() -> {
for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) {
for (IndexService indexService : indicesService) {
for (IndexShard shard : indexService) {
if (shard.routingEntry().primary()) {
final SeqNoStats seqNoStats = shard.seqNoStats();
assertThat("shard " + shard.routingEntry() + " seq_no [" + seqNoStats + "]", seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
}
}
}
}
}, 60, TimeUnit.SECONDS);
ensureGreen("test");
for (final Thread thread : threads) {
thread.join();
}
}
use of org.opensearch.index.shard.IndexShard in project OpenSearch by opensearch-project.
the class GlobalCheckpointSyncIT method testPersistGlobalCheckpoint.
public void testPersistGlobalCheckpoint() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
Settings.Builder indexSettings = Settings.builder().put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(100, 1000, "ms")).put("index.number_of_replicas", randomIntBetween(0, 1));
if (randomBoolean()) {
indexSettings.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(100, 1000, "ms"));
}
prepareCreate("test", indexSettings).get();
if (randomBoolean()) {
ensureGreen("test");
}
int numDocs = randomIntBetween(1, 20);
for (int i = 0; i < numDocs; i++) {
client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get();
}
ensureGreen("test");
assertBusy(() -> {
for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) {
for (IndexService indexService : indicesService) {
for (IndexShard shard : indexService) {
final SeqNoStats seqNoStats = shard.seqNoStats();
assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
assertThat(shard.getLastSyncedGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
}
}
}
});
}
use of org.opensearch.index.shard.IndexShard in project OpenSearch by opensearch-project.
the class GlobalCheckpointSyncIT method testPersistLocalCheckpoint.
public void testPersistLocalCheckpoint() {
internalCluster().ensureAtLeastNumDataNodes(2);
Settings.Builder indexSettings = Settings.builder().put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "10m").put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).put("index.number_of_shards", 1).put("index.number_of_replicas", randomIntBetween(0, 1));
prepareCreate("test", indexSettings).get();
ensureGreen("test");
int numDocs = randomIntBetween(1, 20);
logger.info("numDocs {}", numDocs);
long maxSeqNo = 0;
for (int i = 0; i < numDocs; i++) {
maxSeqNo = client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get().getSeqNo();
logger.info("got {}", maxSeqNo);
}
for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) {
for (IndexService indexService : indicesService) {
for (IndexShard shard : indexService) {
final SeqNoStats seqNoStats = shard.seqNoStats();
assertThat(maxSeqNo, equalTo(seqNoStats.getMaxSeqNo()));
assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
;
}
}
}
}
use of org.opensearch.index.shard.IndexShard in project OpenSearch by opensearch-project.
the class RetentionLeaseIT method testRetentionLeasesSyncOnExpiration.
public void testRetentionLeasesSyncOnExpiration() throws Exception {
final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
final long estimatedTimeIntervalMillis = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(Settings.EMPTY).millis();
final TimeValue retentionLeaseTimeToLive = TimeValue.timeValueMillis(randomLongBetween(estimatedTimeIntervalMillis, 2 * estimatedTimeIntervalMillis));
final Settings settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", numberOfReplicas).put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)).build();
createIndex("index", settings);
ensureGreen("index");
final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
// we will add multiple retention leases, wait for some to expire, and assert a consistent view between the primary and the replicas
final int length = randomIntBetween(1, 8);
for (int i = 0; i < length; i++) {
// update the index for retention leases to live a long time
final AcknowledgedResponse longTtlResponse = client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().putNull(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey()).build()).get();
assertTrue(longTtlResponse.isAcknowledged());
final String id = randomAlphaOfLength(8);
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = countDownLatchListener(latch);
final RetentionLease currentRetentionLease = primary.addRetentionLease(id, retainingSequenceNumber, source, listener);
final long now = System.nanoTime();
latch.await();
// check current retention leases have been synced to all replicas
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
assertThat(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases()).values(), anyOf(empty(), contains(currentRetentionLease)));
}
// update the index for retention leases to short a long time, to force expiration
final AcknowledgedResponse shortTtlResponse = client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), retentionLeaseTimeToLive).build()).get();
assertTrue(shortTtlResponse.isAcknowledged());
// sleep long enough that the current retention lease has expired
final long later = System.nanoTime();
Thread.sleep(Math.max(0, retentionLeaseTimeToLive.millis() - TimeUnit.NANOSECONDS.toMillis(later - now)));
assertBusy(() -> assertThat(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(primary.getRetentionLeases()).entrySet(), empty()));
// now that all retention leases are expired should have been synced to all replicas
assertBusy(() -> {
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
assertThat(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases()).entrySet(), empty());
}
});
}
}
Aggregations