use of org.opensearch.index.cache.IndexCache in project OpenSearch by opensearch-project.
the class IndexService method createShard.
public synchronized IndexShard createShard(final ShardRouting routing, final Consumer<ShardId> globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer) throws IOException {
Objects.requireNonNull(retentionLeaseSyncer);
/*
* TODO: we execute this in parallel but it's a synced method. Yet, we might
* be able to serialize the execution via the cluster state in the future. for now we just
* keep it synced.
*/
if (closed.get()) {
throw new IllegalStateException("Can't create shard " + routing.shardId() + ", closed");
}
final Settings indexSettings = this.indexSettings.getSettings();
final ShardId shardId = routing.shardId();
boolean success = false;
Store store = null;
IndexShard indexShard = null;
ShardLock lock = null;
try {
lock = nodeEnv.shardLock(shardId, "starting shard", TimeUnit.SECONDS.toMillis(5));
eventListener.beforeIndexShardCreated(shardId, indexSettings);
ShardPath path;
try {
path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings.customDataPath());
} catch (IllegalStateException ex) {
logger.warn("{} failed to load shard path, trying to remove leftover", shardId);
try {
ShardPath.deleteLeftoverShardDirectory(logger, nodeEnv, lock, this.indexSettings);
path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings.customDataPath());
} catch (Exception inner) {
ex.addSuppressed(inner);
throw ex;
}
}
if (path == null) {
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
// that's being relocated/replicated we know how large it will become once it's done copying:
// Count up how many shards are currently on each data path:
Map<Path, Integer> dataPathToShardCount = new HashMap<>();
for (IndexShard shard : this) {
Path dataPath = shard.shardPath().getRootStatePath();
Integer curCount = dataPathToShardCount.get(dataPath);
if (curCount == null) {
curCount = 0;
}
dataPathToShardCount.put(dataPath, curCount + 1);
}
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), dataPathToShardCount);
logger.debug("{} creating using a new path [{}]", shardId, path);
} else {
logger.debug("{} creating using an existing path [{}]", shardId, path);
}
if (shards.containsKey(shardId.id())) {
throw new IllegalStateException(shardId + " already exists");
}
logger.debug("creating shard_id {}", shardId);
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final Engine.Warmer engineWarmer = (reader) -> {
IndexShard shard = getShardOrNull(shardId.getId());
if (shard != null) {
warmer.warm(reader, shard, IndexService.this.indexSettings);
}
};
Directory directory = directoryFactory.newDirectory(this.indexSettings, path);
store = new Store(shardId, this.indexSettings, directory, lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)));
eventListener.onStoreCreated(shardId);
indexShard = new IndexShard(routing, this.indexSettings, path, store, indexSortSupplier, indexCache, mapperService, similarityService, engineFactory, engineConfigFactory, eventListener, readerWrapper, threadPool, bigArrays, engineWarmer, searchOperationListeners, indexingOperationListeners, () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, circuitBreakerService);
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
eventListener.afterIndexShardCreated(indexShard);
shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
success = true;
return indexShard;
} catch (ShardLockObtainFailedException e) {
throw new IOException("failed to obtain in-memory shard lock", e);
} finally {
if (success == false) {
if (lock != null) {
IOUtils.closeWhileHandlingException(lock);
}
closeShard("initialization failed", shardId, indexShard, store, eventListener);
}
}
}
use of org.opensearch.index.cache.IndexCache in project OpenSearch by opensearch-project.
the class IndexShard method newEngineConfig.
private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) {
final Sort indexSort = indexSortSupplier.get();
final Engine.Warmer warmer = reader -> {
assert Thread.holdsLock(mutex) == false : "warming engine under mutex";
assert reader != null;
if (this.warmer != null) {
this.warmer.warm(reader);
}
};
return this.engineConfigFactory.newEngineConfig(shardId, threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), shardEventListener, indexCache != null ? indexCache.query() : null, cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Arrays.asList(refreshListeners, refreshPendingLocationListener), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), indexSort, circuitBreakerService, globalCheckpointSupplier, replicationTracker::getRetentionLeases, () -> getOperationPrimaryTerm(), tombstoneDocSupplier());
}
use of org.opensearch.index.cache.IndexCache in project OpenSearch by opensearch-project.
the class IndexShardTestCase method newShard.
/**
* creates a new initializing shard.
* @param routing shard routing to use
* @param shardPath path to use for shard data
* @param indexMetadata indexMetadata for the shard, including any mapping
* @param storeProvider an optional custom store provider to use. If null a default file based store will be created
* @param indexReaderWrapper an optional wrapper to be used during search
* @param globalCheckpointSyncer callback for syncing global checkpoints
* @param indexEventListener index event listener
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetadata indexMetadata, @Nullable CheckedFunction<IndexSettings, Store, IOException> storeProvider, @Nullable CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper, @Nullable EngineFactory engineFactory, @Nullable EngineConfigFactory engineConfigFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException {
final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings);
final IndexShard indexShard;
if (storeProvider == null) {
storeProvider = is -> createStore(is, shardPath);
}
final Store store = storeProvider.apply(indexSettings);
boolean success = false;
try {
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), indexSettings.getSettings(), "index");
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY);
SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap());
final Engine.Warmer warmer = createTestWarmer(indexSettings);
ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
CircuitBreakerService breakerService = new HierarchyCircuitBreakerService(nodeSettings, Collections.emptyList(), clusterSettings);
indexShard = new IndexShard(routing, indexSettings, shardPath, store, () -> null, indexCache, mapperService, similarityService, engineFactory, engineConfigFactory, indexEventListener, indexReaderWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, Collections.emptyList(), Arrays.asList(listeners), globalCheckpointSyncer, retentionLeaseSyncer, breakerService);
indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER);
success = true;
} finally {
if (success == false) {
IOUtils.close(store);
}
}
return indexShard;
}
use of org.opensearch.index.cache.IndexCache in project OpenSearch by opensearch-project.
the class DefaultSearchContextTests method testPreProcess.
public void testPreProcess() throws Exception {
TimeValue timeout = new TimeValue(randomIntBetween(1, 100));
ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class);
when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT);
ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1);
when(shardSearchRequest.shardId()).thenReturn(shardId);
ThreadPool threadPool = new TestThreadPool(this.getClass().getName());
IndexShard indexShard = mock(IndexShard.class);
QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class);
when(indexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy);
when(indexShard.getThreadPool()).thenReturn(threadPool);
int maxResultWindow = randomIntBetween(50, 100);
int maxRescoreWindow = randomIntBetween(50, 100);
int maxSlicesPerScroll = randomIntBetween(50, 100);
Settings settings = Settings.builder().put("index.max_result_window", maxResultWindow).put("index.max_slices_per_scroll", maxSlicesPerScroll).put("index.max_rescore_window", maxRescoreWindow).put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).build();
IndexService indexService = mock(IndexService.class);
IndexCache indexCache = mock(IndexCache.class);
QueryCache queryCache = mock(QueryCache.class);
when(indexCache.query()).thenReturn(queryCache);
when(indexService.cache()).thenReturn(indexCache);
QueryShardContext queryShardContext = mock(QueryShardContext.class);
when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class))).thenReturn(queryShardContext);
MapperService mapperService = mock(MapperService.class);
when(mapperService.hasNested()).thenReturn(randomBoolean());
when(indexService.mapperService()).thenReturn(mapperService);
IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build();
IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY);
when(indexService.getIndexSettings()).thenReturn(indexSettings);
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
final Supplier<Engine.SearcherSupplier> searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) {
@Override
protected void doClose() {
}
@Override
protected Engine.Searcher acquireSearcherInternal(String source) {
try {
IndexReader reader = w.getReader();
return new Engine.Searcher("test", reader, IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), reader);
} catch (IOException exc) {
throw new AssertionError(exc);
}
}
};
SearchShardTarget target = new SearchShardTarget("node", shardId, null, OriginalIndices.NONE);
ReaderContext readerWithoutScroll = new ReaderContext(newContextId(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false);
DefaultSearchContext contextWithoutScroll = new DefaultSearchContext(readerWithoutScroll, shardSearchRequest, target, null, bigArrays, null, timeout, null, false, Version.CURRENT);
contextWithoutScroll.from(300);
contextWithoutScroll.close();
// resultWindow greater than maxResultWindow and scrollContext is null
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> contextWithoutScroll.preProcess(false));
assertThat(exception.getMessage(), equalTo("Result window is too large, from + size must be less than or equal to:" + " [" + maxResultWindow + "] but was [310]. See the scroll api for a more efficient way to request large data sets. " + "This limit can be set by changing the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting."));
// resultWindow greater than maxResultWindow and scrollContext isn't null
when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMillis(randomInt(1000))));
ReaderContext readerContext = new LegacyReaderContext(newContextId(), indexService, indexShard, searcherSupplier.get(), shardSearchRequest, randomNonNegativeLong());
DefaultSearchContext context1 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null, bigArrays, null, timeout, null, false, Version.CURRENT);
context1.from(300);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Batch size is too large, size must be less than or equal to: [" + maxResultWindow + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are " + "controlled by the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting."));
// resultWindow not greater than maxResultWindow and both rescore and sort are not null
context1.from(0);
DocValueFormat docValueFormat = mock(DocValueFormat.class);
SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat });
context1.sort(sortAndFormats);
RescoreContext rescoreContext = mock(RescoreContext.class);
when(rescoreContext.getWindowSize()).thenReturn(500);
context1.addRescore(rescoreContext);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Cannot use [sort] option in conjunction with [rescore]."));
// rescore is null but sort is not null and rescoreContext.getWindowSize() exceeds maxResultWindow
context1.sort(null);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Rescore window [" + rescoreContext.getWindowSize() + "] is too large. " + "It must be less than [" + maxRescoreWindow + "]. This prevents allocating massive heaps for storing the results " + "to be rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting."));
readerContext.close();
readerContext = new ReaderContext(newContextId(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false);
// rescore is null but sliceBuilder is not null
DefaultSearchContext context2 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null, bigArrays, null, timeout, null, false, Version.CURRENT);
SliceBuilder sliceBuilder = mock(SliceBuilder.class);
int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100);
when(sliceBuilder.getMax()).thenReturn(numSlices);
context2.sliceBuilder(sliceBuilder);
exception = expectThrows(IllegalArgumentException.class, () -> context2.preProcess(false));
assertThat(exception.getMessage(), equalTo("The number of slices [" + numSlices + "] is too large. It must " + "be less than [" + maxSlicesPerScroll + "]. This limit can be set by changing the [" + IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting."));
// No exceptions should be thrown
when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY);
when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST);
DefaultSearchContext context3 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null, bigArrays, null, timeout, null, false, Version.CURRENT);
ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery();
context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false);
assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query()));
when(queryShardContext.getIndexSettings()).thenReturn(indexSettings);
when(queryShardContext.fieldMapper(anyString())).thenReturn(mock(MappedFieldType.class));
when(shardSearchRequest.indexRoutings()).thenReturn(new String[0]);
readerContext.close();
readerContext = new ReaderContext(newContextId(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false);
DefaultSearchContext context4 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null, bigArrays, null, timeout, null, false, Version.CURRENT);
context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(false);
Query query1 = context4.query();
context4.sliceBuilder(new SliceBuilder(0, 2)).parsedQuery(parsedQuery).preProcess(false);
Query query2 = context4.query();
assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery);
readerContext.close();
threadPool.shutdown();
}
}
Aggregations