use of org.apache.lucene.util.Accountable in project elasticsearch by elastic.
the class BitSetFilterCacheTests method testRejectOtherIndex.
public void testRejectOtherIndex() throws IOException {
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
@Override
public void onCache(ShardId shardId, Accountable accountable) {
}
@Override
public void onRemoval(ShardId shardId, Accountable accountable) {
}
});
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
writer.addDocument(new Document());
DirectoryReader reader = DirectoryReader.open(writer);
writer.close();
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test2", "_na_", 0));
BitSetProducer producer = cache.getBitSetProducer(new MatchAllDocsQuery());
try {
producer.getBitSet(reader.leaves().get(0));
fail();
} catch (IllegalStateException expected) {
assertEquals("Trying to load bit set for index [test2] with cache of index [test]", expected.getMessage());
} finally {
IOUtils.close(reader, dir);
}
}
use of org.apache.lucene.util.Accountable in project elasticsearch by elastic.
the class IndexFieldDataServiceTests method testFieldDataCacheListener.
public void testFieldDataCacheListener() throws Exception {
final IndexService indexService = createIndex("test");
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
// copy the ifdService since we can set the listener only once.
final IndexFieldDataService ifdService = new IndexFieldDataService(indexService.getIndexSettings(), indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
final MappedFieldType mapper1 = new TextFieldMapper.Builder("s").fielddata(true).build(ctx).fieldType();
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
Document doc = new Document();
doc.add(new StringField("s", "thisisastring", Store.NO));
writer.addDocument(doc);
DirectoryReader open = DirectoryReader.open(writer);
final boolean wrap = randomBoolean();
final IndexReader reader = wrap ? ElasticsearchDirectoryReader.wrap(open, new ShardId("test", "_na_", 1)) : open;
final AtomicInteger onCacheCalled = new AtomicInteger();
final AtomicInteger onRemovalCalled = new AtomicInteger();
ifdService.setListener(new IndexFieldDataCache.Listener() {
@Override
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onCacheCalled.incrementAndGet();
}
@Override
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (wrap) {
assertEquals(new ShardId("test", "_na_", 1), shardId);
} else {
assertNull(shardId);
}
onRemovalCalled.incrementAndGet();
}
});
IndexFieldData<?> ifd = ifdService.getForField(mapper1);
LeafReaderContext leafReaderContext = reader.getContext().leaves().get(0);
AtomicFieldData load = ifd.load(leafReaderContext);
assertEquals(1, onCacheCalled.get());
assertEquals(0, onRemovalCalled.get());
reader.close();
load.close();
writer.close();
assertEquals(1, onCacheCalled.get());
assertEquals(1, onRemovalCalled.get());
ifdService.clear();
}
use of org.apache.lucene.util.Accountable in project elasticsearch by elastic.
the class AbstractStringFieldDataTestCase method testGlobalOrdinalsGetRemovedOnceIndexReaderCloses.
public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception {
fillExtendedMvSet();
refreshReader();
IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues());
IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
assertThat(ifd.loadGlobal(topLevelReader), sameInstance(globalOrdinals));
// 3 b/c 1 segment level caches and 1 top level cache
// in case of doc values, we don't cache atomic FD, so only the top-level cache is there
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 1L : 4L));
IndexOrdinalsFieldData cachedInstance = null;
for (Accountable ramUsage : indicesFieldDataCache.getCache().values()) {
if (ramUsage instanceof IndexOrdinalsFieldData) {
cachedInstance = (IndexOrdinalsFieldData) ramUsage;
break;
}
}
assertThat(cachedInstance, sameInstance(globalOrdinals));
topLevelReader.close();
// Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 0L : 3L));
refreshReader();
assertThat(ifd.loadGlobal(topLevelReader), not(sameInstance(globalOrdinals)));
ifdService.clear();
assertThat(indicesFieldDataCache.getCache().weight(), equalTo(0L));
}
use of org.apache.lucene.util.Accountable in project elasticsearch by elastic.
the class Segment method writeRamTree.
// the ram tree is written recursively since the depth is fairly low (5 or 6)
void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
out.writeString(tree.toString());
out.writeVLong(tree.ramBytesUsed());
Collection<Accountable> children = tree.getChildResources();
out.writeVInt(children.size());
for (Accountable child : children) {
writeRamTree(out, child);
}
}
use of org.apache.lucene.util.Accountable in project elasticsearch by elastic.
the class IndicesSegmentResponse method toXContent.
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.INDICES);
for (IndexSegments indexSegments : getIndices().values()) {
builder.startObject(indexSegments.getIndex());
builder.startObject(Fields.SHARDS);
for (IndexShardSegments indexSegment : indexSegments) {
builder.startArray(Integer.toString(indexSegment.getShardId().id()));
for (ShardSegments shardSegments : indexSegment) {
builder.startObject();
builder.startObject(Fields.ROUTING);
builder.field(Fields.STATE, shardSegments.getShardRouting().state());
builder.field(Fields.PRIMARY, shardSegments.getShardRouting().primary());
builder.field(Fields.NODE, shardSegments.getShardRouting().currentNodeId());
if (shardSegments.getShardRouting().relocatingNodeId() != null) {
builder.field(Fields.RELOCATING_NODE, shardSegments.getShardRouting().relocatingNodeId());
}
builder.endObject();
builder.field(Fields.NUM_COMMITTED_SEGMENTS, shardSegments.getNumberOfCommitted());
builder.field(Fields.NUM_SEARCH_SEGMENTS, shardSegments.getNumberOfSearch());
builder.startObject(Fields.SEGMENTS);
for (Segment segment : shardSegments) {
builder.startObject(segment.getName());
builder.field(Fields.GENERATION, segment.getGeneration());
builder.field(Fields.NUM_DOCS, segment.getNumDocs());
builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSizeInBytes());
builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getMemoryInBytes());
builder.field(Fields.COMMITTED, segment.isCommitted());
builder.field(Fields.SEARCH, segment.isSearch());
if (segment.getVersion() != null) {
builder.field(Fields.VERSION, segment.getVersion());
}
if (segment.isCompound() != null) {
builder.field(Fields.COMPOUND, segment.isCompound());
}
if (segment.getMergeId() != null) {
builder.field(Fields.MERGE_ID, segment.getMergeId());
}
if (segment.ramTree != null) {
builder.startArray(Fields.RAM_TREE);
for (Accountable child : segment.ramTree.getChildResources()) {
toXContent(builder, child);
}
builder.endArray();
}
builder.endObject();
}
builder.endObject();
builder.endObject();
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
builder.endObject();
return builder;
}
Aggregations