use of org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse in project OpenSearch by opensearch-project.
the class OpenSearchIntegTestCase method forceMerge.
/**
* Waits for all relocations and force merge all indices in the cluster to 1 segment.
*/
protected ForceMergeResponse forceMerge() {
waitForRelocation();
ForceMergeResponse actionGet = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
assertNoFailures(actionGet);
return actionGet;
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse in project OpenSearch by opensearch-project.
the class IndexStatsIT method testFilterCacheStats.
public void testFilterCacheStats() throws Exception {
Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "200ms").build();
assertAcked(prepareCreate("index").setSettings(settings).get());
indexRandom(false, true, client().prepareIndex("index").setId("1").setSource("foo", "bar"), client().prepareIndex("index").setId("2").setSource("foo", "baz"));
// Need to persist the global checkpoint for the soft-deletes retention MP.
persistGlobalCheckpoint("index");
refresh();
ensureGreen();
IndicesStatsResponse response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeQueryCacheStats(response);
assertEquals(0, response.getTotal().queryCache.getCacheSize());
// the query cache has an optimization that disables it automatically if there is contention,
// so we run it in an assertBusy block which should eventually succeed
assertBusy(() -> {
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeQueryCacheStats(stats);
assertThat(stats.getTotal().queryCache.getHitCount(), equalTo(0L));
assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L));
});
assertBusy(() -> {
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeQueryCacheStats(stats);
assertThat(stats.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L));
});
assertEquals(DocWriteResponse.Result.DELETED, client().prepareDelete("index", "1").get().getResult());
assertEquals(DocWriteResponse.Result.DELETED, client().prepareDelete("index", "2").get().getResult());
// Here we are testing that a fully deleted segment should be dropped and its cached is evicted.
// In order to instruct the merge policy not to keep a fully deleted segment,
// we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything.
persistGlobalCheckpoint("index");
assertBusy(() -> {
for (final ShardStats shardStats : client().admin().indices().prepareStats("index").get().getIndex("index").getShards()) {
final long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo();
assertTrue(shardStats.getRetentionLeaseStats().retentionLeases().leases().stream().allMatch(retentionLease -> retentionLease.retainingSequenceNumber() == maxSeqNo + 1));
}
});
flush("index");
logger.info("--> force merging to a single segment");
ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge("index").setFlush(true).setMaxNumSegments(1).get();
assertAllSuccessful(forceMergeResponse);
logger.info("--> refreshing");
refresh();
logger.info("--> verifying that cache size is 0");
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L));
assertThat(response.getTotal().queryCache.getCacheCount(), greaterThan(0L));
indexRandom(true, client().prepareIndex("index").setId("1").setSource("foo", "bar"), client().prepareIndex("index").setId("2").setSource("foo", "baz"));
assertBusy(() -> {
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeQueryCacheStats(stats);
assertThat(stats.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getEvictions(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L));
assertThat(stats.getTotal().queryCache.getMemorySizeInBytes(), greaterThan(0L));
});
assertAllSuccessful(client().admin().indices().prepareClearCache("index").setQueryCache(true).get());
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
assertCumulativeQueryCacheStats(response);
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L));
assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), equalTo(0L));
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse in project OpenSearch by opensearch-project.
the class DocumentActionsIT method testIndexActions.
public void testIndexActions() throws Exception {
createIndex();
NumShards numShards = getNumShards(getConcreteIndexName());
logger.info("Running Cluster Health");
ensureGreen();
logger.info("Indexing [type1/1]");
IndexResponse indexResponse = client().prepareIndex().setIndex("test").setId("1").setSource(source("1", "test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
assertThat(indexResponse.getId(), equalTo("1"));
logger.info("Refreshing");
RefreshResponse refreshResponse = refresh();
assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
logger.info("--> index exists?");
assertThat(indexExists(getConcreteIndexName()), equalTo(true));
logger.info("--> index exists?, fake index");
assertThat(indexExists("test1234565"), equalTo(false));
logger.info("Clearing cache");
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").fieldDataCache(true).queryCache(true)).actionGet();
assertNoFailures(clearIndicesCacheResponse);
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
logger.info("Force Merging");
waitForRelocation(ClusterHealthStatus.GREEN);
ForceMergeResponse mergeResponse = forceMerge();
assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
GetResponse getResult;
logger.info("Get [type1/1]");
for (int i = 0; i < 5; i++) {
getResult = client().prepareGet("test", "1").execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test"))));
assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test"));
getResult = client().get(getRequest("test").id("1")).actionGet();
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test"))));
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
}
logger.info("Get [type1/1] with script");
for (int i = 0; i < 5; i++) {
getResult = client().prepareGet("test", "1").setStoredFields("name").execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat(getResult.isExists(), equalTo(true));
assertThat(getResult.getSourceAsBytes(), nullValue());
assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test"));
}
logger.info("Get [type1/2] (should be empty)");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").id("2")).actionGet();
assertThat(getResult.isExists(), equalTo(false));
}
logger.info("Delete [type1/1]");
DeleteResponse deleteResponse = client().prepareDelete("test", "1").execute().actionGet();
assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
assertThat(deleteResponse.getId(), equalTo("1"));
logger.info("Refreshing");
client().admin().indices().refresh(refreshRequest("test")).actionGet();
logger.info("Get [type1/1] (should be empty)");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").id("1")).actionGet();
assertThat(getResult.isExists(), equalTo(false));
}
logger.info("Index [type1/1]");
client().index(indexRequest("test").id("1").source(source("1", "test"))).actionGet();
logger.info("Index [type1/2]");
client().index(indexRequest("test").id("2").source(source("2", "test2"))).actionGet();
logger.info("Flushing");
FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet();
assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards));
assertThat(flushResult.getFailedShards(), equalTo(0));
logger.info("Refreshing");
client().admin().indices().refresh(refreshRequest("test")).actionGet();
logger.info("Get [type1/1] and [type1/2]");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").id("1")).actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test"))));
getResult = client().get(getRequest("test").id("2")).actionGet();
String ste1 = getResult.getSourceAsString();
String ste2 = Strings.toString(source("2", "test2"));
assertThat("cycle #" + i, ste1, equalTo(ste2));
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
}
logger.info("Count");
// check count
for (int i = 0; i < 5; i++) {
// test successful
SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(matchAllQuery()).execute().actionGet();
assertNoFailures(countResponse);
assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
// count with no query is a match all one
countResponse = client().prepareSearch("test").setSize(0).execute().actionGet();
assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
}
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse in project OpenSearch by opensearch-project.
the class IndicesRequestCacheIT method testCacheWithFilteredAlias.
public void testCacheWithFilteredAlias() {
Client client = client();
Settings settings = Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build();
assertAcked(client.admin().indices().prepareCreate("index").setMapping("created_at", "type=date").setSettings(settings).addAlias(new Alias("last_week").filter(QueryBuilders.rangeQuery("created_at").gte("now-7d/d"))).get());
ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC);
client.prepareIndex("index").setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get();
// Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get();
OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
refresh();
assertCacheState(client, "index", 0, 0);
SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get();
OpenSearchAssertions.assertAllSuccessful(r1);
assertThat(r1.getHits().getTotalHits().value, equalTo(1L));
assertCacheState(client, "index", 0, 1);
r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")).get();
OpenSearchAssertions.assertAllSuccessful(r1);
assertThat(r1.getHits().getTotalHits().value, equalTo(1L));
assertCacheState(client, "index", 1, 1);
r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get();
OpenSearchAssertions.assertAllSuccessful(r1);
assertThat(r1.getHits().getTotalHits().value, equalTo(1L));
assertCacheState(client, "index", 1, 2);
r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get();
OpenSearchAssertions.assertAllSuccessful(r1);
assertThat(r1.getHits().getTotalHits().value, equalTo(1L));
assertCacheState(client, "index", 2, 2);
}
use of org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse in project OpenSearch by opensearch-project.
the class IndicesRequestCacheIT method testCanCache.
public void testCanCache() throws Exception {
Client client = client();
Settings settings = Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build();
assertAcked(client.admin().indices().prepareCreate("index").setMapping("s", "type=date").setSettings(settings).get());
indexRandom(true, client.prepareIndex("index").setId("1").setRouting("1").setSource("s", "2016-03-19"), client.prepareIndex("index").setId("2").setRouting("1").setSource("s", "2016-03-20"), client.prepareIndex("index").setId("3").setRouting("1").setSource("s", "2016-03-21"), client.prepareIndex("index").setId("4").setRouting("2").setSource("s", "2016-03-22"), client.prepareIndex("index").setId("5").setRouting("2").setSource("s", "2016-03-23"), client.prepareIndex("index").setId("6").setRouting("2").setSource("s", "2016-03-24"), client.prepareIndex("index").setId("7").setRouting("3").setSource("s", "2016-03-25"), client.prepareIndex("index").setId("8").setRouting("3").setSource("s", "2016-03-26"), client.prepareIndex("index").setId("9").setRouting("3").setSource("s", "2016-03-27"));
ensureSearchable("index");
assertCacheState(client, "index", 0, 0);
// Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get();
OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
refresh();
ensureSearchable("index");
assertCacheState(client, "index", 0, 0);
// If size > 0 we should no cache by default
final SearchResponse r1 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")).get();
OpenSearchAssertions.assertAllSuccessful(r1);
assertThat(r1.getHits().getTotalHits().value, equalTo(7L));
assertCacheState(client, "index", 0, 0);
// If search type is DFS_QUERY_THEN_FETCH we should not cache
final SearchResponse r2 = client.prepareSearch("index").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setSize(0).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")).get();
OpenSearchAssertions.assertAllSuccessful(r2);
assertThat(r2.getHits().getTotalHits().value, equalTo(7L));
assertCacheState(client, "index", 0, 0);
// If search type is DFS_QUERY_THEN_FETCH we should not cache even if
// the cache flag is explicitly set on the request
final SearchResponse r3 = client.prepareSearch("index").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setSize(0).setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")).get();
OpenSearchAssertions.assertAllSuccessful(r3);
assertThat(r3.getHits().getTotalHits().value, equalTo(7L));
assertCacheState(client, "index", 0, 0);
// If the request has an non-filter aggregation containing now we should not cache
final SearchResponse r5 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")).addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")).get();
OpenSearchAssertions.assertAllSuccessful(r5);
assertThat(r5.getHits().getTotalHits().value, equalTo(7L));
assertCacheState(client, "index", 0, 0);
// If size > 1 and cache flag is set on the request we should cache
final SearchResponse r6 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get();
OpenSearchAssertions.assertAllSuccessful(r6);
assertThat(r6.getHits().getTotalHits().value, equalTo(7L));
assertCacheState(client, "index", 0, 2);
// If the request has a filter aggregation containing now we should cache since it gets rewritten
final SearchResponse r4 = client.prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")).addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))).get();
OpenSearchAssertions.assertAllSuccessful(r4);
assertThat(r4.getHits().getTotalHits().value, equalTo(7L));
assertCacheState(client, "index", 0, 4);
}
Aggregations