use of org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest in project OpenSearch by opensearch-project.
the class IndicesRequestConverters method forceMerge.
static Request forceMerge(ForceMergeRequest forceMergeRequest) {
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge"));
RequestConverters.Params parameters = new RequestConverters.Params();
parameters.withIndicesOptions(forceMergeRequest.indicesOptions());
parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments()));
parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes()));
parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush()));
request.addParameters(parameters.asMap());
return request;
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest in project OpenSearch by opensearch-project.
the class RecoveryDuringReplicationTests method testAddNewReplicas.
public void testAddNewReplicas() throws Exception {
AtomicBoolean stopped = new AtomicBoolean();
List<Thread> threads = new ArrayList<>();
Runnable stopIndexing = () -> {
try {
stopped.set(true);
for (Thread thread : threads) {
thread.join();
}
} catch (Exception e) {
throw new AssertionError(e);
}
};
try (ReplicationGroup shards = createGroup(between(0, 1));
Releasable ignored = stopIndexing::run) {
shards.startAll();
boolean appendOnly = randomBoolean();
AtomicInteger docId = new AtomicInteger();
int numThreads = between(1, 3);
for (int i = 0; i < numThreads; i++) {
Thread thread = new Thread(() -> {
while (stopped.get() == false) {
try {
int nextId = docId.incrementAndGet();
if (appendOnly) {
String id = randomBoolean() ? Integer.toString(nextId) : null;
shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON));
} else if (frequently()) {
String id = Integer.toString(frequently() ? nextId : between(0, nextId));
shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON));
} else {
String id = Integer.toString(between(0, nextId));
shards.delete(new DeleteRequest(index.getName()).id(id));
}
if (randomInt(100) < 10) {
shards.getPrimary().flush(new FlushRequest());
}
if (randomInt(100) < 5) {
shards.getPrimary().forceMerge(new ForceMergeRequest().flush(randomBoolean()).maxNumSegments(1));
}
} catch (Exception ex) {
throw new AssertionError(ex);
}
}
});
threads.add(thread);
thread.start();
}
// we flush quite often
assertBusy(() -> assertThat(docId.get(), greaterThanOrEqualTo(50)), 60, TimeUnit.SECONDS);
shards.getPrimary().sync();
IndexShard newReplica = shards.addReplica();
shards.recoverReplica(newReplica);
// we flush quite often
assertBusy(() -> assertThat(docId.get(), greaterThanOrEqualTo(100)), 60, TimeUnit.SECONDS);
stopIndexing.run();
assertBusy(() -> assertThat(getDocIdAndSeqNos(newReplica), equalTo(getDocIdAndSeqNos(shards.getPrimary()))));
}
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest in project OpenSearch by opensearch-project.
the class IndexShardTests method testDocStats.
public void testDocStats() throws Exception {
IndexShard indexShard = null;
try {
indexShard = newStartedShard(false, Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0).build());
// at least two documents so we have docs to delete
final long numDocs = randomIntBetween(2, 32);
final long numDocsToDelete = randomLongBetween(1, numDocs);
for (int i = 0; i < numDocs; i++) {
final String id = Integer.toString(i);
indexDoc(indexShard, "_doc", id);
}
if (randomBoolean()) {
indexShard.refresh("test");
} else {
indexShard.flush(new FlushRequest());
}
{
IndexShard shard = indexShard;
assertBusy(() -> {
ThreadPool threadPool = shard.getThreadPool();
assertThat(threadPool.relativeTimeInMillis(), greaterThan(shard.getLastSearcherAccess()));
});
long prevAccessTime = shard.getLastSearcherAccess();
final DocsStats docsStats = indexShard.docStats();
assertThat("searcher was marked as accessed", shard.getLastSearcherAccess(), equalTo(prevAccessTime));
assertThat(docsStats.getCount(), equalTo(numDocs));
try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
assertTrue(searcher.getIndexReader().numDocs() <= docsStats.getCount());
}
assertThat(docsStats.getDeleted(), equalTo(0L));
assertThat(docsStats.getAverageSizeInBytes(), greaterThan(0L));
}
final List<Integer> ids = randomSubsetOf(Math.toIntExact(numDocsToDelete), IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList()));
for (final Integer i : ids) {
final String id = Integer.toString(i);
deleteDoc(indexShard, "_doc", id);
indexDoc(indexShard, "_doc", id);
}
// Need to update and sync the global checkpoint and the retention leases for the soft-deletes retention MergePolicy.
final long newGlobalCheckpoint = indexShard.getLocalCheckpoint();
if (indexShard.routingEntry().primary()) {
indexShard.updateLocalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint());
indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint());
indexShard.syncRetentionLeases();
} else {
indexShard.updateGlobalCheckpointOnReplica(newGlobalCheckpoint, "test");
final RetentionLeases retentionLeases = indexShard.getRetentionLeases();
indexShard.updateRetentionLeasesOnReplica(new RetentionLeases(retentionLeases.primaryTerm(), retentionLeases.version() + 1, retentionLeases.leases().stream().map(lease -> new RetentionLease(lease.id(), newGlobalCheckpoint + 1, lease.timestamp(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE)).collect(Collectors.toList())));
}
indexShard.sync();
// flush the buffered deletes
final FlushRequest flushRequest = new FlushRequest();
flushRequest.force(false);
flushRequest.waitIfOngoing(false);
indexShard.flush(flushRequest);
if (randomBoolean()) {
indexShard.refresh("test");
}
{
final DocsStats docStats = indexShard.docStats();
try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
assertTrue(searcher.getIndexReader().numDocs() <= docStats.getCount());
}
assertThat(docStats.getCount(), equalTo(numDocs));
}
// merge them away
final ForceMergeRequest forceMergeRequest = new ForceMergeRequest();
forceMergeRequest.maxNumSegments(1);
indexShard.forceMerge(forceMergeRequest);
if (randomBoolean()) {
indexShard.refresh("test");
} else {
indexShard.flush(new FlushRequest());
}
{
final DocsStats docStats = indexShard.docStats();
assertThat(docStats.getCount(), equalTo(numDocs));
assertThat(docStats.getDeleted(), equalTo(0L));
assertThat(docStats.getAverageSizeInBytes(), greaterThan(0L));
}
} finally {
closeShards(indexShard);
}
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest in project OpenSearch by opensearch-project.
the class RestForceMergeAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final ForceMergeRequest mergeRequest = new ForceMergeRequest(Strings.splitStringByCommaToArray(request.param("index")));
mergeRequest.indicesOptions(IndicesOptions.fromRequest(request, mergeRequest.indicesOptions()));
mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments()));
mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes()));
mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush()));
if (mergeRequest.onlyExpungeDeletes() && mergeRequest.maxNumSegments() != ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) {
deprecationLogger.deprecate("force_merge_expunge_deletes_and_max_num_segments_deprecation", "setting only_expunge_deletes and max_num_segments at the same time is deprecated and will be rejected in a future version");
}
return channel -> client.admin().indices().forceMerge(mergeRequest, new RestToXContentListener<>(channel));
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest in project OpenSearch by opensearch-project.
the class IndicesRequestConvertersTests method testForceMerge.
public void testForceMerge() {
String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5);
ForceMergeRequest forceMergeRequest;
if (OpenSearchTestCase.randomBoolean()) {
forceMergeRequest = new ForceMergeRequest(indices);
} else {
forceMergeRequest = new ForceMergeRequest();
forceMergeRequest.indices(indices);
}
Map<String, String> expectedParams = new HashMap<>();
RequestConvertersTests.setRandomIndicesOptions(forceMergeRequest::indicesOptions, forceMergeRequest::indicesOptions, expectedParams);
if (OpenSearchTestCase.randomBoolean()) {
forceMergeRequest.maxNumSegments(OpenSearchTestCase.randomInt());
}
expectedParams.put("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments()));
if (OpenSearchTestCase.randomBoolean()) {
forceMergeRequest.onlyExpungeDeletes(OpenSearchTestCase.randomBoolean());
}
expectedParams.put("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes()));
if (OpenSearchTestCase.randomBoolean()) {
forceMergeRequest.flush(OpenSearchTestCase.randomBoolean());
}
expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush()));
Request request = IndicesRequestConverters.forceMerge(forceMergeRequest);
StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices));
}
endpoint.add("_forcemerge");
Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString()));
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
Assert.assertThat(request.getEntity(), nullValue());
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
}
Aggregations