use of org.opensearch.indices.IndicesService in project OpenSearch by opensearch-project.
the class RareClusterStateIT method testDelayedMappingPropagationOnReplica.
public void testDelayedMappingPropagationOnReplica() throws Exception {
// This is essentially the same thing as testDelayedMappingPropagationOnPrimary
// but for replicas
// Here we want to test that everything goes well if the mappings that
// are needed for a document are not available on the replica at the
// time of indexing it
final List<String> nodeNames = internalCluster().startNodes(2);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
final String master = internalCluster().getMasterName();
assertThat(nodeNames, hasItem(master));
String otherNode = null;
for (String node : nodeNames) {
if (node.equals(master) == false) {
otherNode = node;
break;
}
}
assertNotNull(otherNode);
// Force allocation of the primary on the master node by first only allocating on the master
// and then allowing all nodes so that the replica gets allocated on the other node
prepareCreate("index").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put("index.routing.allocation.include._name", master)).get();
client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put("index.routing.allocation.include._name", "")).get();
ensureGreen();
// Check routing tables
ClusterState state = client().admin().cluster().prepareState().get().getState();
assertEquals(master, state.nodes().getMasterNode().getName());
List<ShardRouting> shards = state.routingTable().allShards("index");
assertThat(shards, hasSize(2));
for (ShardRouting shard : shards) {
if (shard.primary()) {
// primary must be on the master
assertEquals(state.nodes().getMasterNodeId(), shard.currentNodeId());
} else {
assertTrue(shard.active());
}
}
// Block cluster state processing on the replica
BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, random());
internalCluster().setDisruptionScheme(disruption);
disruption.startDisrupting();
final ActionFuture<AcknowledgedResponse> putMappingResponse = executeAndCancelCommittedPublication(client().admin().indices().preparePutMapping("index").setSource("field", "type=long"));
final Index index = resolveIndex("index");
// Wait for mappings to be available on master
assertBusy(() -> {
final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master);
final IndexService indexService = indicesService.indexServiceSafe(index);
assertNotNull(indexService);
final MapperService mapperService = indexService.mapperService();
DocumentMapper mapper = mapperService.documentMapper(MapperService.SINGLE_MAPPING_NAME);
assertNotNull(mapper);
assertNotNull(mapper.mappers().getMapper("field"));
});
final ActionFuture<IndexResponse> docIndexResponse = client().prepareIndex("index").setId("1").setSource("field", 42).execute();
assertBusy(() -> assertTrue(client().prepareGet("index", "1").get().isExists()));
// index another document, this time using dynamic mappings.
// The ack timeout of 0 on dynamic mapping updates makes it possible for the document to be indexed on the primary, even
// if the dynamic mapping update is not applied on the replica yet.
// this request does not change the cluster state, because the mapping is dynamic,
// we need to await and cancel committed publication
ActionFuture<IndexResponse> dynamicMappingsFut = executeAndCancelCommittedPublication(client().prepareIndex("index").setId("2").setSource("field2", 42));
// ...and wait for second mapping to be available on master
assertBusy(() -> {
final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master);
final IndexService indexService = indicesService.indexServiceSafe(index);
assertNotNull(indexService);
final MapperService mapperService = indexService.mapperService();
DocumentMapper mapper = mapperService.documentMapper(MapperService.SINGLE_MAPPING_NAME);
assertNotNull(mapper);
assertNotNull(mapper.mappers().getMapper("field2"));
});
assertBusy(() -> assertTrue(client().prepareGet("index", "2").get().isExists()));
// The mappings have not been propagated to the replica yet as a consequence the document count not be indexed
// We wait on purpose to make sure that the document is not indexed because the shard operation is stalled
// and not just because it takes time to replicate the indexing request to the replica
Thread.sleep(100);
assertFalse(putMappingResponse.isDone());
assertFalse(docIndexResponse.isDone());
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged());
assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class));
// both shards should have succeeded
assertEquals(2, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal());
assertThat(dynamicMappingsFut.get(10, TimeUnit.SECONDS).getResult(), equalTo(CREATED));
}
use of org.opensearch.indices.IndicesService in project OpenSearch by opensearch-project.
the class ShardStateIT method testPrimaryFailureIncreasesTerm.
public void testPrimaryFailureIncreasesTerm() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get();
ensureGreen();
assertPrimaryTerms(1, 1);
logger.info("--> disabling allocation to capture shard failure");
disableAllocation("test");
ClusterState state = client().admin().cluster().prepareState().get().getState();
final int shard = randomBoolean() ? 0 : 1;
final String nodeId = state.routingTable().index("test").shard(shard).primaryShard().currentNodeId();
final String node = state.nodes().get(nodeId).getName();
logger.info("--> failing primary of [{}] on node [{}]", shard, node);
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
indicesService.indexService(resolveIndex("test")).getShard(shard).failShard("simulated test failure", null);
logger.info("--> waiting for a yellow index");
// we can't use ensureYellow since that one is just as happy with a GREEN status.
assertBusy(() -> assertThat(client().admin().cluster().prepareHealth("test").get().getStatus(), equalTo(ClusterHealthStatus.YELLOW)));
final long term0 = shard == 0 ? 2 : 1;
final long term1 = shard == 1 ? 2 : 1;
assertPrimaryTerms(term0, term1);
logger.info("--> enabling allocation");
enableAllocation("test");
ensureGreen();
assertPrimaryTerms(term0, term1);
}
use of org.opensearch.indices.IndicesService in project OpenSearch by opensearch-project.
the class ShrinkIndexIT method testShrinkCommitsMergeOnIdle.
public void testShrinkCommitsMergeOnIdle() throws Exception {
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("index.number_of_replicas", 0).put("number_of_shards", 5)).get();
for (int i = 0; i < 30; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("source").get();
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", discoveryNodes[0].getName()).put("index.blocks.write", true)).get();
ensureGreen();
IndicesSegmentResponse sourceStats = client().admin().indices().prepareSegments("source").get();
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
// making it hard to pin point the source shards.
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")).get();
// now merge source into a single shard index
assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(Settings.builder().put("index.number_of_replicas", 0).build()).get());
ensureGreen();
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
IndexMetadata target = clusterStateResponse.getState().getMetadata().index("target");
client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get();
IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get();
ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0];
assertTrue(segmentsStats.getNumberOfCommitted() > 0);
assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted());
Iterable<IndicesService> dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
for (IndicesService service : dataNodeInstances) {
if (service.hasIndex(target.getIndex())) {
IndexService indexShards = service.indexService(target.getIndex());
IndexShard shard = indexShards.getShard(0);
assertTrue(shard.isActive());
shard.flushOnIdle(0);
assertFalse(shard.isActive());
}
}
assertBusy(() -> {
IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get();
ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0];
Map<Integer, IndexShardSegments> source = sourceStats.getIndices().get("source").getShards();
int numSourceSegments = 0;
for (IndexShardSegments s : source.values()) {
numSourceSegments += s.getAt(0).getNumberOfCommitted();
}
assertTrue(targetShardSegments.getSegments().size() < numSourceSegments);
assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getNumberOfSearch());
assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getSegments().size());
assertEquals(1, targetShardSegments.getSegments().size());
});
// clean up
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null)).get();
}
use of org.opensearch.indices.IndicesService in project OpenSearch by opensearch-project.
the class IndexShardIT method testMaybeFlush.
public void testMaybeFlush() throws Exception {
createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldPeriodicallyFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(135, /* size of the operation + one generation header&footer*/
ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldPeriodicallyFlush());
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, new SourceToParse("test", "_doc", "1", new BytesArray("{}"), XContentType.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
assertTrue(shard.shouldPeriodicallyFlush());
final Translog translog = getTranslog(shard);
assertEquals(2, translog.stats().getUncommittedOperations());
assertThat(shard.flushStats().getTotal(), equalTo(0L));
client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(2L));
assertBusy(() -> {
// this is async
assertFalse(shard.shouldPeriodicallyFlush());
assertThat(shard.flushStats().getPeriodic(), equalTo(1L));
assertThat(shard.flushStats().getTotal(), equalTo(1L));
});
shard.sync();
assertThat(shard.getLastSyncedGlobalCheckpoint(), equalTo(2L));
assertThat("last commit [" + shard.commitStats().getUserData() + "]", translog.stats().getUncommittedOperations(), equalTo(0));
long size = Math.max(translog.stats().getUncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1);
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)).build()).get();
client().prepareDelete("test", "2").get();
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration());
assertBusy(() -> {
// this is async
final TranslogStats translogStats = translog.stats();
final CommitStats commitStats = shard.commitStats();
final FlushStats flushStats = shard.flushStats();
logger.info("--> translog stats [{}] gen [{}] commit_stats [{}] flush_stats [{}/{}]", Strings.toString(translogStats), translog.getGeneration().translogFileGeneration, commitStats.getUserData(), flushStats.getPeriodic(), flushStats.getTotal());
assertFalse(shard.shouldPeriodicallyFlush());
});
shard.sync();
assertEquals(0, translog.stats().getUncommittedOperations());
}
use of org.opensearch.indices.IndicesService in project OpenSearch by opensearch-project.
the class GlobalCheckpointSyncIT method runGlobalCheckpointSyncTest.
private void runGlobalCheckpointSyncTest(final TimeValue globalCheckpointSyncInterval, final Consumer<Client> beforeIndexing, final Consumer<Client> afterIndexing) throws Exception {
final int numberOfReplicas = randomIntBetween(1, 4);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
prepareCreate("test", Settings.builder().put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), globalCheckpointSyncInterval).put("index.number_of_replicas", numberOfReplicas)).get();
if (randomBoolean()) {
ensureGreen();
}
beforeIndexing.accept(client());
final int numberOfDocuments = randomIntBetween(0, 256);
final int numberOfThreads = randomIntBetween(1, 4);
final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
// start concurrent indexing threads
final List<Thread> threads = new ArrayList<>(numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
final Thread thread = new Thread(() -> {
try {
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new RuntimeException(e);
}
for (int j = 0; j < numberOfDocuments; j++) {
final String id = Integer.toString(index * numberOfDocuments + j);
client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get();
}
try {
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new RuntimeException(e);
}
});
threads.add(thread);
thread.start();
}
// synchronize the start of the threads
barrier.await();
// wait for the threads to finish
barrier.await();
afterIndexing.accept(client());
assertBusy(() -> {
for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) {
for (IndexService indexService : indicesService) {
for (IndexShard shard : indexService) {
if (shard.routingEntry().primary()) {
final SeqNoStats seqNoStats = shard.seqNoStats();
assertThat("shard " + shard.routingEntry() + " seq_no [" + seqNoStats + "]", seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
}
}
}
}
}, 60, TimeUnit.SECONDS);
ensureGreen("test");
for (final Thread thread : threads) {
thread.join();
}
}
Aggregations