use of org.opensearch.index.IndexService in project OpenSearch by opensearch-project.
the class IndexShardIT method testStressMaybeFlushOrRollTranslogGeneration.
public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldPeriodicallyFlush());
final boolean flush = randomBoolean();
final Settings settings;
if (flush) {
// size of the operation plus the overhead of one generation.
settings = Settings.builder().put("index.translog.flush_threshold_size", "125b").build();
} else {
// size of the operation plus header and footer
settings = Settings.builder().put("index.translog.generation_threshold_size", "117b").build();
}
client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get();
client().prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldPeriodicallyFlush());
final AtomicBoolean running = new AtomicBoolean(true);
final int numThreads = randomIntBetween(2, 4);
final Thread[] threads = new Thread[numThreads];
final CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(() -> {
try {
barrier.await();
} catch (final InterruptedException | BrokenBarrierException e) {
throw new RuntimeException(e);
}
while (running.get()) {
shard.afterWriteOperation();
}
});
threads[i].start();
}
barrier.await();
final CheckedRunnable<Exception> check;
if (flush) {
final FlushStats initialStats = shard.flushStats();
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
check = () -> {
assertFalse(shard.shouldPeriodicallyFlush());
final FlushStats currentStats = shard.flushStats();
String msg = String.format(Locale.ROOT, "flush stats: total=[%d vs %d], periodic=[%d vs %d]", initialStats.getTotal(), currentStats.getTotal(), initialStats.getPeriodic(), currentStats.getPeriodic());
assertThat(msg, currentStats.getPeriodic(), either(equalTo(initialStats.getPeriodic() + 1)).or(equalTo(initialStats.getPeriodic() + 2)));
assertThat(msg, currentStats.getTotal(), either(equalTo(initialStats.getTotal() + 1)).or(equalTo(initialStats.getTotal() + 2)));
};
} else {
final long generation = getTranslog(shard).currentFileGeneration();
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
check = () -> {
assertFalse(shard.shouldRollTranslogGeneration());
assertEquals(generation + 1, getTranslog(shard).currentFileGeneration());
};
}
assertBusy(check);
running.set(false);
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
check.run();
}
use of org.opensearch.index.IndexService in project OpenSearch by opensearch-project.
the class IndexShardIT method testDurableFlagHasEffect.
public void testDurableFlagHasEffect() throws Exception {
createIndex("test");
ensureGreen();
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
Translog translog = getTranslog(shard);
Predicate<Translog> needsSync = (tlog) -> {
// we can't use tlog.needsSync() here since it also takes the global checkpoint into account
// we explicitly want to check here if our durability checks are taken into account so we only
// check if we are synced upto the current write location
Translog.Location lastWriteLocation = tlog.getLastWriteLocation();
try {
// the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one
return tlog.ensureSynced(new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
setDurability(shard, Translog.Durability.REQUEST);
assertFalse(needsSync.test(translog));
setDurability(shard, Translog.Durability.ASYNC);
client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get();
assertTrue(needsSync.test(translog));
setDurability(shard, Translog.Durability.REQUEST);
client().prepareDelete("test", "1").get();
assertFalse(needsSync.test(translog));
setDurability(shard, Translog.Durability.ASYNC);
client().prepareDelete("test", "2").get();
assertTrue(translog.syncNeeded());
setDurability(shard, Translog.Durability.REQUEST);
assertNoFailures(client().prepareBulk().add(client().prepareIndex("test").setId("3").setSource("{}", XContentType.JSON)).add(client().prepareDelete("test", "1")).get());
assertFalse(needsSync.test(translog));
setDurability(shard, Translog.Durability.ASYNC);
assertNoFailures(client().prepareBulk().add(client().prepareIndex("test").setId("4").setSource("{}", XContentType.JSON)).add(client().prepareDelete("test", "3")).get());
setDurability(shard, Translog.Durability.REQUEST);
assertTrue(needsSync.test(translog));
}
use of org.opensearch.index.IndexService in project OpenSearch by opensearch-project.
the class SearchIdleIT method runTestAutomaticRefresh.
private void runTestAutomaticRefresh(final IntToLongFunction count) throws InterruptedException {
TimeValue randomTimeValue = randomFrom(random(), null, TimeValue.ZERO, TimeValue.timeValueMillis(randomIntBetween(0, 1000)));
Settings.Builder builder = Settings.builder();
if (randomTimeValue != null) {
builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), randomTimeValue);
}
IndexService indexService = createIndex("test", builder.build());
assertFalse(indexService.getIndexSettings().isExplicitRefresh());
ensureGreen();
AtomicInteger totalNumDocs = new AtomicInteger(Integer.MAX_VALUE);
assertNoSearchHits(client().prepareSearch().get());
int numDocs = scaledRandomIntBetween(25, 100);
totalNumDocs.set(numDocs);
CountDownLatch indexingDone = new CountDownLatch(numDocs);
client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
// one doc is indexed above blocking
indexingDone.countDown();
IndexShard shard = indexService.getShard(0);
boolean hasRefreshed = shard.scheduledRefresh();
if (randomTimeValue == TimeValue.ZERO) {
// with ZERO we are guaranteed to see the doc since we will wait for a refresh in the background
assertFalse(hasRefreshed);
assertTrue(shard.isSearchIdle());
} else {
if (randomTimeValue == null) {
assertFalse(shard.isSearchIdle());
}
// until the background refresh is done.
if (hasRefreshed == false) {
ensureNoPendingScheduledRefresh(indexService.getThreadPool());
}
}
CountDownLatch started = new CountDownLatch(1);
Thread t = new Thread(() -> {
started.countDown();
do {
} while (count.applyAsLong(totalNumDocs.get()) != totalNumDocs.get());
});
t.start();
started.await();
assertThat(count.applyAsLong(totalNumDocs.get()), equalTo(1L));
for (int i = 1; i < numDocs; i++) {
client().prepareIndex("test").setId("" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).execute(new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
indexingDone.countDown();
}
@Override
public void onFailure(Exception e) {
indexingDone.countDown();
throw new AssertionError(e);
}
});
}
indexingDone.await();
t.join();
}
use of org.opensearch.index.IndexService in project OpenSearch by opensearch-project.
the class SearchIdleIT method testPendingRefreshWithIntervalChange.
public void testPendingRefreshWithIntervalChange() throws Exception {
Settings.Builder builder = Settings.builder();
builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO);
IndexService indexService = createIndex("test", builder.build());
assertFalse(indexService.getIndexSettings().isExplicitRefresh());
ensureGreen();
client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
IndexShard shard = indexService.getShard(0);
assertFalse(shard.scheduledRefresh());
assertTrue(shard.isSearchIdle());
CountDownLatch refreshLatch = new CountDownLatch(1);
// async on purpose to make sure
client().admin().indices().prepareRefresh().execute(ActionListener.wrap(refreshLatch::countDown));
// it happens concurrently
assertHitCount(client().prepareSearch().get(), 1);
client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
assertFalse(shard.scheduledRefresh());
assertTrue(shard.hasRefreshPending());
// now disable background refresh and make sure the refresh happens
CountDownLatch updateSettingsLatch = new CountDownLatch(1);
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1).build()).execute(ActionListener.wrap(updateSettingsLatch::countDown));
assertHitCount(client().prepareSearch().get(), 2);
// wait for both to ensure we don't have in-flight operations
updateSettingsLatch.await();
refreshLatch.await();
assertFalse(shard.hasRefreshPending());
// We need to ensure a `scheduledRefresh` triggered by the internal refresh setting update is executed before we index a new doc;
// otherwise, it will compete to call `Engine#maybeRefresh` with the `scheduledRefresh` that we are going to verify.
ensureNoPendingScheduledRefresh(indexService.getThreadPool());
client().prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
assertTrue(shard.scheduledRefresh());
assertFalse(shard.hasRefreshPending());
assertTrue(shard.isSearchIdle());
assertHitCount(client().prepareSearch().get(), 3);
}
use of org.opensearch.index.IndexService in project OpenSearch by opensearch-project.
the class UpdateMappingIntegrationIT method assertConcreteMappingsOnAll.
/**
* Waits until mappings for the provided fields exist on all nodes. Note, this waits for the current
* started shards and checks for concrete mappings.
*/
private void assertConcreteMappingsOnAll(final String index, final String... fieldNames) {
Set<String> nodes = internalCluster().nodesInclude(index);
assertThat(nodes, Matchers.not(Matchers.emptyIterable()));
for (String node : nodes) {
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
IndexService indexService = indicesService.indexService(resolveIndex(index));
assertThat("index service doesn't exists on " + node, indexService, notNullValue());
MapperService mapperService = indexService.mapperService();
for (String fieldName : fieldNames) {
MappedFieldType fieldType = mapperService.fieldType(fieldName);
assertNotNull("field " + fieldName + " doesn't exists on " + node, fieldType);
}
}
assertMappingOnMaster(index, fieldNames);
}
Aggregations