use of org.apache.lucene.util.RamUsageTester in project lucene-solr by apache.
the class TestLRUQueryCache method testRamBytesUsedConstantEntryOverhead.
// Test what happens when the cache contains only filters and doc id sets
// that require very little memory. In that case most of the memory is taken
// by the cache itself, not cache entries, and we want to make sure that
// memory usage is not grossly underestimated.
public void testRamBytesUsedConstantEntryOverhead() throws IOException {
assumeFalse("LUCENE-7595: RamUsageTester does not work exact in Java 9 (estimations for maps and lists)", Constants.JRE_IS_MINIMUM_JAVA9);
final LRUQueryCache queryCache = new LRUQueryCache(1000000, 10000000, context -> true);
final RamUsageTester.Accumulator acc = new RamUsageTester.Accumulator() {
@Override
public long accumulateObject(Object o, long shallowSize, Map<Field, Object> fieldValues, Collection<Object> queue) {
if (o instanceof DocIdSet) {
return ((DocIdSet) o).ramBytesUsed();
}
if (o instanceof Query) {
return queryCache.ramBytesUsed((Query) o);
}
if (o.getClass().getSimpleName().equals("SegmentCoreReaders")) {
// do not follow references to core cache keys
return 0;
}
return super.accumulateObject(o, shallowSize, fieldValues, queue);
}
};
Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
final int numDocs = atLeast(100);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(doc);
}
final DirectoryReader reader = w.getReader();
final IndexSearcher searcher = new IndexSearcher(reader);
searcher.setQueryCache(queryCache);
searcher.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE);
final int numQueries = atLeast(1000);
for (int i = 0; i < numQueries; ++i) {
final Query query = new DummyQuery();
searcher.search(new ConstantScoreQuery(query), 1);
}
assertTrue(queryCache.getCacheCount() > 0);
final long actualRamBytesUsed = RamUsageTester.sizeOf(queryCache, acc);
final long expectedRamBytesUsed = queryCache.ramBytesUsed();
// error < 30%
assertEquals(actualRamBytesUsed, expectedRamBytesUsed, 30 * actualRamBytesUsed / 100);
reader.close();
w.close();
dir.close();
}
use of org.apache.lucene.util.RamUsageTester in project lucene-solr by apache.
the class TestLRUQueryCache method testRamBytesUsedAgreesWithRamUsageTester.
// This test makes sure that by making the same assumptions as LRUQueryCache, RAMUsageTester
// computes the same memory usage.
public void testRamBytesUsedAgreesWithRamUsageTester() throws IOException {
assumeFalse("LUCENE-7595: RamUsageTester does not work exact in Java 9 (estimations for maps and lists)", Constants.JRE_IS_MINIMUM_JAVA9);
final LRUQueryCache queryCache = new LRUQueryCache(1 + random().nextInt(5), 1 + random().nextInt(10000), context -> random().nextBoolean());
// an accumulator that only sums up memory usage of referenced filters and doc id sets
final RamUsageTester.Accumulator acc = new RamUsageTester.Accumulator() {
@Override
public long accumulateObject(Object o, long shallowSize, Map<Field, Object> fieldValues, Collection<Object> queue) {
if (o instanceof DocIdSet) {
return ((DocIdSet) o).ramBytesUsed();
}
if (o instanceof Query) {
return queryCache.ramBytesUsed((Query) o);
}
if (o instanceof IndexReader || o.getClass().getSimpleName().equals("SegmentCoreReaders")) {
// do not take readers or core cache keys into account
return 0;
}
if (o instanceof Map) {
Map<?, ?> map = (Map<?, ?>) o;
queue.addAll(map.keySet());
queue.addAll(map.values());
final long sizePerEntry = o instanceof LinkedHashMap ? LRUQueryCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY : LRUQueryCache.HASHTABLE_RAM_BYTES_PER_ENTRY;
return sizePerEntry * map.size();
}
// follow links to other objects, but ignore their memory usage
super.accumulateObject(o, shallowSize, fieldValues, queue);
return 0;
}
@Override
public long accumulateArray(Object array, long shallowSize, List<Object> values, Collection<Object> queue) {
// follow links to other objects, but ignore their memory usage
super.accumulateArray(array, shallowSize, values, queue);
return 0;
}
};
Directory dir = newDirectory();
// serial merges so that segments do not get closed while we are measuring ram usage
// with RamUsageTester
IndexWriterConfig iwc = newIndexWriterConfig().setMergeScheduler(new SerialMergeScheduler());
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final List<String> colors = Arrays.asList("blue", "red", "green", "yellow");
Document doc = new Document();
StringField f = new StringField("color", "", Store.NO);
doc.add(f);
final int iters = atLeast(5);
for (int iter = 0; iter < iters; ++iter) {
final int numDocs = atLeast(10);
for (int i = 0; i < numDocs; ++i) {
f.setStringValue(RandomPicks.randomFrom(random(), colors));
w.addDocument(doc);
}
try (final DirectoryReader reader = w.getReader()) {
final IndexSearcher searcher = newSearcher(reader);
searcher.setQueryCache(queryCache);
searcher.setQueryCachingPolicy(MAYBE_CACHE_POLICY);
for (int i = 0; i < 3; ++i) {
final Query query = new TermQuery(new Term("color", RandomPicks.randomFrom(random(), colors)));
searcher.search(new ConstantScoreQuery(query), 1);
}
}
queryCache.assertConsistent();
assertEquals(RamUsageTester.sizeOf(queryCache, acc), queryCache.ramBytesUsed());
}
w.close();
dir.close();
}
Aggregations