use of org.opensearch.test.BackgroundIndexer in project OpenSearch by opensearch-project.
the class RecoveryWhileUnderLoadIT method testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest.
public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() throws Exception {
logger.info("--> creating test index ...");
int numberOfShards = numberOfShards();
assertAcked(prepareCreate("test", 1, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)));
final int totalNumDocs = scaledRandomIntBetween(200, 10000);
int waitFor = totalNumDocs / 10;
int extraDocs = waitFor;
try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type", client(), extraDocs)) {
logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(waitFor, indexer);
indexer.assertNoFailures();
logger.info("--> {} docs indexed", waitFor);
extraDocs = totalNumDocs / 10;
waitFor += extraDocs;
indexer.continueIndexing(extraDocs);
logger.info("--> flushing the index ....");
// now flush, just to make sure we have some data in the index, not just translog
client().admin().indices().prepareFlush().execute().actionGet();
logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(waitFor, indexer);
indexer.assertNoFailures();
logger.info("--> {} docs indexed", waitFor);
extraDocs = totalNumDocs - waitFor;
indexer.continueIndexing(extraDocs);
logger.info("--> allow 4 nodes for index [test] ...");
allowNodes("test", 4);
logger.info("--> waiting for GREEN health status ...");
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus());
logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
waitForDocs(totalNumDocs, indexer);
indexer.assertNoFailures();
logger.info("--> {} docs indexed", totalNumDocs);
logger.info("--> marking and waiting for indexing threads to stop ...");
indexer.stopAndAwaitStopped();
logger.info("--> indexing threads stopped");
logger.info("--> refreshing the index");
refreshAndAssert();
logger.info("--> verifying indexed content");
iterateAssertCount(numberOfShards, 10, indexer.getIds());
}
}
use of org.opensearch.test.BackgroundIndexer in project OpenSearch by opensearch-project.
the class SimpleBlocksIT method testAddBlockWhileIndexingDocuments.
public void testAddBlockWhileIndexingDocuments() throws Exception {
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createIndex(indexName);
final APIBlock block = randomAddableBlock();
int nbDocs = 0;
try {
try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), 1000)) {
indexer.setFailureAssertion(t -> {
Throwable cause = ExceptionsHelper.unwrapCause(t);
assertThat(cause, instanceOf(ClusterBlockException.class));
ClusterBlockException e = (ClusterBlockException) cause;
assertThat(e.blocks(), hasSize(1));
assertTrue(e.blocks().stream().allMatch(b -> b.id() == block.getBlock().id()));
});
waitForDocs(randomIntBetween(10, 50), indexer);
assertAcked(client().admin().indices().prepareAddBlock(block, indexName));
indexer.stopAndAwaitStopped();
nbDocs += indexer.totalIndexedDocs();
}
assertIndexHasBlock(block, indexName);
} finally {
disableIndexBlock(indexName, block);
}
refresh(indexName);
assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(TRACK_TOTAL_HITS_ACCURATE).get(), nbDocs);
}
use of org.opensearch.test.BackgroundIndexer in project OpenSearch by opensearch-project.
the class OpenSearchMockAPIBasedRepositoryIntegTestCase method testRequestStats.
public void testRequestStats() throws Exception {
final String repository = createRepository(randomName());
final String index = "index-no-merges";
createIndex(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build());
final long nbDocs = randomLongBetween(10_000L, 20_000L);
try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) {
waitForDocs(nbDocs, indexer);
}
flushAndRefresh(index);
ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get();
assertThat(forceMerge.getSuccessfulShards(), equalTo(1));
assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs);
final String snapshot = "snapshot";
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index));
assertAcked(client().admin().indices().prepareDelete(index));
assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true));
ensureGreen(index);
assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs);
assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get());
final RepositoryStats repositoryStats = StreamSupport.stream(internalCluster().getInstances(RepositoriesService.class).spliterator(), false).map(repositoriesService -> {
try {
return repositoriesService.repository(repository);
} catch (RepositoryMissingException e) {
return null;
}
}).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get();
Map<String, Long> sdkRequestCounts = repositoryStats.requestCounts;
final Map<String, Long> mockCalls = getMockRequestCounts();
String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", sdkRequestCounts, mockCalls);
assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts);
}
use of org.opensearch.test.BackgroundIndexer in project OpenSearch by opensearch-project.
the class OpenSearchMockAPIBasedRepositoryIntegTestCase method testSnapshotWithLargeSegmentFiles.
/**
* Test the snapshot and restore of an index which has large segments files.
*/
public void testSnapshotWithLargeSegmentFiles() throws Exception {
final String repository = createRepository(randomName());
final String index = "index-no-merges";
createIndex(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build());
final long nbDocs = randomLongBetween(10_000L, 20_000L);
try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) {
waitForDocs(nbDocs, indexer);
}
flushAndRefresh(index);
ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get();
assertThat(forceMerge.getSuccessfulShards(), equalTo(1));
assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs);
final String snapshot = "snapshot";
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index));
assertAcked(client().admin().indices().prepareDelete(index));
assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true));
ensureGreen(index);
assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs);
assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get());
}
use of org.opensearch.test.BackgroundIndexer in project OpenSearch by opensearch-project.
the class IndexRecoveryIT method testReplicaRecovery.
public void testReplicaRecovery() throws Exception {
final String nodeA = internalCluster().startNode();
createIndex(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT).build());
ensureGreen(INDEX_NAME);
final int numOfDocs = scaledRandomIntBetween(0, 200);
try (BackgroundIndexer indexer = new BackgroundIndexer(INDEX_NAME, "_doc", client(), numOfDocs)) {
waitForDocs(numOfDocs, indexer);
}
refresh(INDEX_NAME);
assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numOfDocs);
final boolean closedIndex = randomBoolean();
if (closedIndex) {
assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
ensureGreen(INDEX_NAME);
}
// force a shard recovery from nodeA to nodeB
final String nodeB = internalCluster().startNode();
assertAcked(client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
ensureGreen(INDEX_NAME);
final RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
// we should now have two total shards, one primary and one replica
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(2));
List<RecoveryState> nodeAResponses = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeAResponses.size(), equalTo(1));
List<RecoveryState> nodeBResponses = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBResponses.size(), equalTo(1));
// validate node A recovery
final RecoveryState nodeARecoveryState = nodeAResponses.get(0);
final RecoverySource expectedRecoverySource;
if (closedIndex == false) {
expectedRecoverySource = RecoverySource.EmptyStoreRecoverySource.INSTANCE;
} else {
expectedRecoverySource = RecoverySource.ExistingStoreRecoverySource.INSTANCE;
}
assertRecoveryState(nodeARecoveryState, 0, expectedRecoverySource, true, Stage.DONE, null, nodeA);
validateIndexRecoveryState(nodeARecoveryState.getIndex());
// validate node B recovery
final RecoveryState nodeBRecoveryState = nodeBResponses.get(0);
assertRecoveryState(nodeBRecoveryState, 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryState.getIndex());
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeA));
if (closedIndex) {
assertAcked(client().admin().indices().prepareOpen(INDEX_NAME));
}
assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numOfDocs);
}
Aggregations