use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class IndexFieldDataServiceTests method testFieldDataCacheListener.
public void testFieldDataCacheListener() throws Exception {
final IndexService indexService = createIndex("test");
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
// copy the ifdService since we can set the listener only once.
final IndexFieldDataService ifdService = new IndexFieldDataService(indexService.getIndexSettings(), indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
final MappedFieldType mapper1 = new TextFieldMapper.Builder("s").fielddata(true).build(ctx).fieldType();
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
Document doc = new Document();
doc.add(new StringField("s", "thisisastring", Store.NO));
writer.addDocument(doc);
DirectoryReader open = DirectoryReader.open(writer);
final boolean wrap = randomBoolean();
final IndexReader reader = wrap ? ElasticsearchDirectoryReader.wrap(open, new ShardId("test", "_na_", 1)) : open;
final AtomicInteger onCacheCalled = new AtomicInteger();
final AtomicInteger onRemovalCalled = new AtomicInteger();
ifdService.setListener(new IndexFieldDataCache.Listener() {
@Override
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onCacheCalled.incrementAndGet();
}
@Override
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onRemovalCalled.incrementAndGet();
}
});
IndexFieldData<?> ifd = ifdService.getForField(mapper1);
LeafReaderContext leafReaderContext = reader.getContext().leaves().get(0);
AtomicFieldData load = ifd.load(leafReaderContext);
assertEquals(1, onCacheCalled.get());
assertEquals(0, onRemovalCalled.get());
reader.close();
load.close();
writer.close();
assertEquals(1, onCacheCalled.get());
assertEquals(1, onRemovalCalled.get());
ifdService.clear();
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class GlobalCheckpointSyncActionTests method testTranslogSyncAfterGlobalCheckpointSync.
public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception {
final IndicesService indicesService = mock(IndicesService.class);
final Index index = new Index("index", "uuid");
final IndexService indexService = mock(IndexService.class);
when(indicesService.indexServiceSafe(index)).thenReturn(indexService);
final int id = randomIntBetween(0, 4);
final IndexShard indexShard = mock(IndexShard.class);
when(indexService.getShard(id)).thenReturn(indexShard);
final Translog translog = mock(Translog.class);
when(indexShard.getTranslog()).thenReturn(translog);
final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction(Settings.EMPTY, transportService, clusterService, indicesService, threadPool, shardStateAction, new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver(Settings.EMPTY));
final ShardId shardId = new ShardId(index, id);
final GlobalCheckpointSyncAction.PrimaryRequest primaryRequest = new GlobalCheckpointSyncAction.PrimaryRequest(shardId);
if (randomBoolean()) {
action.shardOperationOnPrimary(primaryRequest, indexShard);
} else {
action.shardOperationOnReplica(new GlobalCheckpointSyncAction.ReplicaRequest(primaryRequest, randomNonNegativeLong()), indexShard);
}
verify(translog).sync();
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class IndexShardIT method testStressMaybeFlush.
public void testStressMaybeFlush() throws Exception {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(117, /* size of the operation + header&footer*/
ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldFlush());
final AtomicBoolean running = new AtomicBoolean(true);
final int numThreads = randomIntBetween(2, 4);
Thread[] threads = new Thread[numThreads];
CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new RuntimeException(e);
}
while (running.get()) {
shard.maybeFlush();
}
}
};
threads[i].start();
}
barrier.await();
FlushStats flushStats = shard.flushStats();
long total = flushStats.getTotal();
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
assertBusy(() -> assertEquals(total + 1, shard.flushStats().getTotal()));
running.set(false);
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
assertEquals(total + 1, shard.flushStats().getTotal());
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class IndexShardIT method testMarkAsInactiveTriggersSyncedFlush.
public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get();
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
assertBusy(() -> {
IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test");
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
});
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
use of org.elasticsearch.indices.IndicesService in project elasticsearch by elastic.
the class IndexShardIT method testMaybeFlush.
public void testMaybeFlush() throws Exception {
createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(117, /* size of the operation + header&footer*/
ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldFlush());
ParsedDocument doc = testParsedDocument("1", "test", null, SequenceNumbersService.UNASSIGNED_SEQ_NO, new ParseContext.Document(), new BytesArray(new byte[] { 1 }), XContentType.JSON, null);
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
shard.index(index);
assertTrue(shard.shouldFlush());
assertEquals(2, shard.getEngine().getTranslog().totalOperations());
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertBusy(() -> {
// this is async
assertFalse(shard.shouldFlush());
});
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
shard.getEngine().getTranslog().sync();
long size = shard.getEngine().getTranslog().sizeInBytes();
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)).build()).get();
client().prepareDelete("test", "test", "2").get();
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertBusy(() -> {
// this is async
logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertFalse(shard.shouldFlush());
});
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
}
Aggregations