use of org.opensearch.indices.fielddata.cache.IndicesFieldDataCache in project OpenSearch by opensearch-project.
the class RandomExceptionCircuitBreakerIT method testBreakerWithRandomExceptions.
public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException {
for (NodeStats node : client().admin().cluster().prepareNodesStats().clear().addMetric(BREAKER.metricName()).execute().actionGet().getNodes()) {
assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
}
String mapping = // {}
Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test-str").field("type", "keyword").field("doc_values", randomBoolean()).endObject().startObject("test-num").field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer"))).endObject().endObject().endObject().endObject());
final double topLevelRate;
final double lowLevelRate;
if (frequently()) {
if (randomBoolean()) {
if (randomBoolean()) {
lowLevelRate = 1.0 / between(2, 10);
topLevelRate = 0.0d;
} else {
topLevelRate = 1.0 / between(2, 10);
lowLevelRate = 0.0d;
}
} else {
lowLevelRate = 1.0 / between(2, 10);
topLevelRate = 1.0 / between(2, 10);
}
} else {
// rarely no exception
topLevelRate = 0d;
lowLevelRate = 0d;
}
Settings.Builder settings = Settings.builder().put(indexSettings()).put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate).put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate).put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
logger.info("creating index: [test] using settings: [{}]", settings.build());
CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
final int numDocs;
if (response.isShardsAcknowledged() == false) {
/* some seeds just won't let you create the index at all and we enter a ping-pong mode
* trying one node after another etc. that is ok but we need to make sure we don't wait
* forever when indexing documents so we set numDocs = 1 and expect all shards to fail
* when we search below.*/
if (response.isAcknowledged()) {
logger.info("Index creation timed out waiting for primaries to start - only index one doc and expect searches to fail");
} else {
logger.info("Index creation failed - only index one doc and expect searches to fail");
}
numDocs = 1;
} else {
numDocs = between(10, 100);
}
for (int i = 0; i < numDocs; i++) {
try {
client().prepareIndex("test").setId("" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i).get();
} catch (OpenSearchException ex) {
}
}
logger.info("Start Refresh");
// don't assert on failures here
RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
logger.info("Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
final int numSearches = scaledRandomIntBetween(50, 150);
NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().clear().addMetric(BREAKER.metricName()).execute().actionGet();
for (NodeStats stats : resp.getNodes()) {
assertThat("Breaker is set to 0", stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
}
for (int i = 0; i < numSearches; i++) {
SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
if (random().nextBoolean()) {
searchRequestBuilder.addSort("test-str", SortOrder.ASC);
}
searchRequestBuilder.addSort("test-num", SortOrder.ASC);
boolean success = false;
try {
// Sort by the string and numeric fields, to load them into field data
searchRequestBuilder.get();
success = true;
} catch (SearchPhaseExecutionException ex) {
logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
}
if (frequently()) {
// Now, clear the cache and check that the circuit breaker has been
// successfully set back to zero. If there is a bug in the circuit
// breaker adjustment code, it should show up here by the breaker
// estimate being either positive or negative.
// make sure all shards are there - there could be shards that are still starting up.
ensureGreen("test");
assertAllSuccessful(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).execute().actionGet());
// Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually
for (String node : internalCluster().getNodeNames()) {
final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesService.class, node).getIndicesFieldDataCache();
// Clean up the cache, ensuring that entries' listeners have been called
fdCache.getCache().refresh();
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().clear().addMetric(BREAKER.metricName()).execute().actionGet();
for (NodeStats stats : nodeStats.getNodes()) {
assertThat("Breaker reset to 0 last search success: " + success + " mapping: " + mapping, stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
}
}
}
}
use of org.opensearch.indices.fielddata.cache.IndicesFieldDataCache in project OpenSearch by opensearch-project.
the class IndexFieldDataServiceTests method doTestRequireDocValues.
private void doTestRequireDocValues(MappedFieldType ft) {
ThreadPool threadPool = new TestThreadPool("random_threadpool_name");
try {
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null);
IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null);
if (ft.hasDocValues()) {
// no exception
ifds.getForField(ft, "test", () -> {
throw new UnsupportedOperationException();
});
} else {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ifds.getForField(ft, "test", () -> {
throw new UnsupportedOperationException();
}));
assertThat(e.getMessage(), containsString("doc values"));
}
} finally {
threadPool.shutdown();
}
}
use of org.opensearch.indices.fielddata.cache.IndicesFieldDataCache in project OpenSearch by opensearch-project.
the class IndexShardTests method testReaderWrapperWorksWithGlobalOrdinals.
public void testReaderWrapperWorksWithGlobalOrdinals() throws IOException {
CheckedFunction<DirectoryReader, DirectoryReader, IOException> wrapper = reader -> new FieldMaskingReader("foo", reader);
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
IndexMetadata metadata = IndexMetadata.builder("test").putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}").settings(settings).primaryTerm(0, 1).build();
IndexShard shard = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, wrapper);
recoverShardFromStore(shard);
indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}");
shard.refresh("created segment 1");
indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}");
shard.refresh("created segment 2");
// test global ordinals are evicted
MappedFieldType foo = shard.mapperService().fieldType("foo");
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(shard.indexSettings.getNodeSettings(), new IndexFieldDataCache.Listener() {
});
IndexFieldDataService indexFieldDataService = new IndexFieldDataService(shard.indexSettings, indicesFieldDataCache, new NoneCircuitBreakerService(), shard.mapperService());
IndexFieldData.Global ifd = indexFieldDataService.getForField(foo, "test", () -> {
throw new UnsupportedOperationException("search lookup not available");
});
FieldDataStats before = shard.fieldData().stats("foo");
assertThat(before.getMemorySizeInBytes(), equalTo(0L));
FieldDataStats after = null;
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
assertThat("we have to have more than one segment", searcher.getDirectoryReader().leaves().size(), greaterThan(1));
ifd.loadGlobal(searcher.getDirectoryReader());
after = shard.fieldData().stats("foo");
assertEquals(after.getEvictions(), before.getEvictions());
// If a field doesn't exist an empty IndexFieldData is returned and that isn't cached:
assertThat(after.getMemorySizeInBytes(), equalTo(0L));
}
assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes());
shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
shard.refresh("test");
assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes());
assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
closeShards(shard);
}
use of org.opensearch.indices.fielddata.cache.IndicesFieldDataCache in project OpenSearch by opensearch-project.
the class IndicesService method verifyIndexMetadata.
/**
* This method verifies that the given {@code metadata} holds sane values to create an {@link IndexService}.
* This method tries to update the meta data of the created {@link IndexService} if the given {@code metadataUpdate}
* is different from the given {@code metadata}.
* This method will throw an exception if the creation or the update fails.
* The created {@link IndexService} will not be registered and will be closed immediately.
*/
public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetadata metadataUpdate) throws IOException {
final List<Closeable> closeables = new ArrayList<>();
try {
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
});
closeables.add(indicesFieldDataCache);
IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings);
closeables.add(indicesQueryCache);
// this will also fail if some plugin fails etc. which is nice since we can verify that early
final IndexService service = createIndexService(METADATA_VERIFICATION, metadata, indicesQueryCache, indicesFieldDataCache, emptyList());
closeables.add(() -> service.close("metadata verification", false));
service.mapperService().merge(metadata, MapperService.MergeReason.MAPPING_RECOVERY);
if (metadata.equals(metadataUpdate) == false) {
service.updateMetadata(metadata, metadataUpdate);
}
} finally {
IOUtils.close(closeables);
}
}
use of org.opensearch.indices.fielddata.cache.IndicesFieldDataCache in project OpenSearch by opensearch-project.
the class InternalTestCluster method ensureEstimatedStats.
@Override
public void ensureEstimatedStats() {
if (size() > 0) {
// of the breakers
for (NodeAndClient nodeAndClient : nodes.values()) {
final IndicesFieldDataCache fdCache = getInstanceFromNode(IndicesService.class, nodeAndClient.node).getIndicesFieldDataCache();
// Clean up the cache, ensuring that entries' listeners have been called
fdCache.getCache().refresh();
final String name = nodeAndClient.name;
final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node);
CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA);
assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L));
// fail if it never reached 0
try {
assertBusy(() -> {
CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST);
assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L));
});
} catch (Exception e) {
throw new AssertionError("Exception during check for request breaker reset to 0", e);
}
NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
CommonStatsFlags flags = new CommonStatsFlags(Flag.FieldData, Flag.QueryCache, Flag.Segments);
NodeStats stats = nodeService.stats(flags, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false);
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
}
}
}
Aggregations