use of org.opensearch.action.admin.indices.flush.FlushRequest in project OpenSearch by opensearch-project.
the class IndicesRequestConverters method flush.
static Request flush(FlushRequest flushRequest) {
String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices();
Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush"));
RequestConverters.Params parameters = new RequestConverters.Params();
parameters.withIndicesOptions(flushRequest.indicesOptions());
parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing()));
parameters.putParam("force", Boolean.toString(flushRequest.force()));
request.addParameters(parameters.asMap());
return request;
}
use of org.opensearch.action.admin.indices.flush.FlushRequest in project OpenSearch by opensearch-project.
the class IndicesRequestConvertersTests method testFlush.
public void testFlush() {
String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5);
FlushRequest flushRequest;
if (OpenSearchTestCase.randomBoolean()) {
flushRequest = new FlushRequest(indices);
} else {
flushRequest = new FlushRequest();
flushRequest.indices(indices);
}
Map<String, String> expectedParams = new HashMap<>();
RequestConvertersTests.setRandomIndicesOptions(flushRequest::indicesOptions, flushRequest::indicesOptions, expectedParams);
if (OpenSearchTestCase.randomBoolean()) {
flushRequest.force(OpenSearchTestCase.randomBoolean());
}
expectedParams.put("force", Boolean.toString(flushRequest.force()));
if (OpenSearchTestCase.randomBoolean()) {
flushRequest.waitIfOngoing(OpenSearchTestCase.randomBoolean());
}
expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing()));
Request request = IndicesRequestConverters.flush(flushRequest);
StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices));
}
endpoint.add("_flush");
Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString()));
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
Assert.assertThat(request.getEntity(), nullValue());
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
}
use of org.opensearch.action.admin.indices.flush.FlushRequest in project OpenSearch by opensearch-project.
the class RecoveryDuringReplicationTests method testRollbackOnPromotion.
public void testRollbackOnPromotion() throws Exception {
try (ReplicationGroup shards = createGroup(between(2, 3))) {
shards.startAll();
IndexShard newPrimary = randomFrom(shards.getReplicas());
int initDocs = shards.indexDocs(randomInt(100));
int inFlightOpsOnNewPrimary = 0;
int inFlightOps = scaledRandomIntBetween(10, 200);
for (int i = 0; i < inFlightOps; i++) {
String id = "extra-" + i;
IndexRequest primaryRequest = new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON);
BulkShardRequest replicationRequest = indexOnPrimary(primaryRequest, shards.getPrimary());
for (IndexShard replica : shards.getReplicas()) {
if (randomBoolean()) {
indexOnReplica(replicationRequest, shards, replica);
if (replica == newPrimary) {
inFlightOpsOnNewPrimary++;
}
}
}
if (randomBoolean()) {
shards.syncGlobalCheckpoint();
}
if (rarely()) {
shards.flush();
}
}
shards.refresh("test");
List<DocIdSeqNoAndSource> docsBelowGlobalCheckpoint = EngineTestCase.getDocIds(getEngine(newPrimary), randomBoolean()).stream().filter(doc -> doc.getSeqNo() <= newPrimary.getLastKnownGlobalCheckpoint()).collect(Collectors.toList());
CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean done = new AtomicBoolean();
Thread thread = new Thread(() -> {
List<IndexShard> replicas = new ArrayList<>(shards.getReplicas());
replicas.remove(newPrimary);
latch.countDown();
while (done.get() == false) {
try {
List<DocIdSeqNoAndSource> exposedDocs = EngineTestCase.getDocIds(getEngine(randomFrom(replicas)), randomBoolean());
assertThat(docsBelowGlobalCheckpoint, everyItem(is(in(exposedDocs))));
assertThat(randomFrom(replicas).getLocalCheckpoint(), greaterThanOrEqualTo(initDocs - 1L));
} catch (AlreadyClosedException ignored) {
// replica swaps engine during rollback
} catch (Exception e) {
throw new AssertionError(e);
}
}
});
thread.start();
latch.await();
shards.promoteReplicaToPrimary(newPrimary).get();
shards.assertAllEqual(initDocs + inFlightOpsOnNewPrimary);
int moreDocsAfterRollback = shards.indexDocs(scaledRandomIntBetween(1, 20));
shards.assertAllEqual(initDocs + inFlightOpsOnNewPrimary + moreDocsAfterRollback);
done.set(true);
thread.join();
shards.syncGlobalCheckpoint();
for (IndexShard shard : shards) {
shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
assertThat(shard.translogStats().getUncommittedOperations(), equalTo(0));
}
}
}
use of org.opensearch.action.admin.indices.flush.FlushRequest in project OpenSearch by opensearch-project.
the class RecoveryDuringReplicationTests method testAddNewReplicas.
public void testAddNewReplicas() throws Exception {
AtomicBoolean stopped = new AtomicBoolean();
List<Thread> threads = new ArrayList<>();
Runnable stopIndexing = () -> {
try {
stopped.set(true);
for (Thread thread : threads) {
thread.join();
}
} catch (Exception e) {
throw new AssertionError(e);
}
};
try (ReplicationGroup shards = createGroup(between(0, 1));
Releasable ignored = stopIndexing::run) {
shards.startAll();
boolean appendOnly = randomBoolean();
AtomicInteger docId = new AtomicInteger();
int numThreads = between(1, 3);
for (int i = 0; i < numThreads; i++) {
Thread thread = new Thread(() -> {
while (stopped.get() == false) {
try {
int nextId = docId.incrementAndGet();
if (appendOnly) {
String id = randomBoolean() ? Integer.toString(nextId) : null;
shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON));
} else if (frequently()) {
String id = Integer.toString(frequently() ? nextId : between(0, nextId));
shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON));
} else {
String id = Integer.toString(between(0, nextId));
shards.delete(new DeleteRequest(index.getName()).id(id));
}
if (randomInt(100) < 10) {
shards.getPrimary().flush(new FlushRequest());
}
if (randomInt(100) < 5) {
shards.getPrimary().forceMerge(new ForceMergeRequest().flush(randomBoolean()).maxNumSegments(1));
}
} catch (Exception ex) {
throw new AssertionError(ex);
}
}
});
threads.add(thread);
thread.start();
}
// we flush quite often
assertBusy(() -> assertThat(docId.get(), greaterThanOrEqualTo(50)), 60, TimeUnit.SECONDS);
shards.getPrimary().sync();
IndexShard newReplica = shards.addReplica();
shards.recoverReplica(newReplica);
// we flush quite often
assertBusy(() -> assertThat(docId.get(), greaterThanOrEqualTo(100)), 60, TimeUnit.SECONDS);
stopIndexing.run();
assertBusy(() -> assertThat(getDocIdAndSeqNos(newReplica), equalTo(getDocIdAndSeqNos(shards.getPrimary()))));
}
}
use of org.opensearch.action.admin.indices.flush.FlushRequest in project OpenSearch by opensearch-project.
the class IndexShardTests method testReaderWrapperWorksWithGlobalOrdinals.
public void testReaderWrapperWorksWithGlobalOrdinals() throws IOException {
CheckedFunction<DirectoryReader, DirectoryReader, IOException> wrapper = reader -> new FieldMaskingReader("foo", reader);
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
IndexMetadata metadata = IndexMetadata.builder("test").putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}").settings(settings).primaryTerm(0, 1).build();
IndexShard shard = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, wrapper);
recoverShardFromStore(shard);
indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
shard.refresh("created segment 1");
indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}");
shard.refresh("created segment 2");
// test global ordinals are evicted
MappedFieldType foo = shard.mapperService().fieldType("foo");
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(shard.indexSettings.getNodeSettings(), new IndexFieldDataCache.Listener() {
});
IndexFieldDataService indexFieldDataService = new IndexFieldDataService(shard.indexSettings, indicesFieldDataCache, new NoneCircuitBreakerService(), shard.mapperService());
IndexFieldData.Global ifd = indexFieldDataService.getForField(foo, "test", () -> {
throw new UnsupportedOperationException("search lookup not available");
});
FieldDataStats before = shard.fieldData().stats("foo");
assertThat(before.getMemorySizeInBytes(), equalTo(0L));
FieldDataStats after = null;
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
assertThat("we have to have more than one segment", searcher.getDirectoryReader().leaves().size(), greaterThan(1));
ifd.loadGlobal(searcher.getDirectoryReader());
after = shard.fieldData().stats("foo");
assertEquals(after.getEvictions(), before.getEvictions());
// If a field doesn't exist an empty IndexFieldData is returned and that isn't cached:
assertThat(after.getMemorySizeInBytes(), equalTo(0L));
}
assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes());
shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
shard.refresh("test");
assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes());
assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
closeShards(shard);
}
Aggregations