use of org.apache.lucene.document.StringField in project lucene-solr by apache.
the class TestLRUQueryCache method testRamBytesUsedAgreesWithRamUsageTester.
// This test makes sure that by making the same assumptions as LRUQueryCache, RAMUsageTester
// computes the same memory usage.
public void testRamBytesUsedAgreesWithRamUsageTester() throws IOException {
assumeFalse("LUCENE-7595: RamUsageTester does not work exact in Java 9 (estimations for maps and lists)", Constants.JRE_IS_MINIMUM_JAVA9);
final LRUQueryCache queryCache = new LRUQueryCache(1 + random().nextInt(5), 1 + random().nextInt(10000), context -> random().nextBoolean());
// an accumulator that only sums up memory usage of referenced filters and doc id sets
final RamUsageTester.Accumulator acc = new RamUsageTester.Accumulator() {
@Override
public long accumulateObject(Object o, long shallowSize, Map<Field, Object> fieldValues, Collection<Object> queue) {
if (o instanceof DocIdSet) {
return ((DocIdSet) o).ramBytesUsed();
}
if (o instanceof Query) {
return queryCache.ramBytesUsed((Query) o);
}
if (o instanceof IndexReader || o.getClass().getSimpleName().equals("SegmentCoreReaders")) {
// do not take readers or core cache keys into account
return 0;
}
if (o instanceof Map) {
Map<?, ?> map = (Map<?, ?>) o;
queue.addAll(map.keySet());
queue.addAll(map.values());
final long sizePerEntry = o instanceof LinkedHashMap ? LRUQueryCache.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY : LRUQueryCache.HASHTABLE_RAM_BYTES_PER_ENTRY;
return sizePerEntry * map.size();
}
// follow links to other objects, but ignore their memory usage
super.accumulateObject(o, shallowSize, fieldValues, queue);
return 0;
}
@Override
public long accumulateArray(Object array, long shallowSize, List<Object> values, Collection<Object> queue) {
// follow links to other objects, but ignore their memory usage
super.accumulateArray(array, shallowSize, values, queue);
return 0;
}
};
Directory dir = newDirectory();
// serial merges so that segments do not get closed while we are measuring ram usage
// with RamUsageTester
IndexWriterConfig iwc = newIndexWriterConfig().setMergeScheduler(new SerialMergeScheduler());
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final List<String> colors = Arrays.asList("blue", "red", "green", "yellow");
Document doc = new Document();
StringField f = new StringField("color", "", Store.NO);
doc.add(f);
final int iters = atLeast(5);
for (int iter = 0; iter < iters; ++iter) {
final int numDocs = atLeast(10);
for (int i = 0; i < numDocs; ++i) {
f.setStringValue(RandomPicks.randomFrom(random(), colors));
w.addDocument(doc);
}
try (final DirectoryReader reader = w.getReader()) {
final IndexSearcher searcher = newSearcher(reader);
searcher.setQueryCache(queryCache);
searcher.setQueryCachingPolicy(MAYBE_CACHE_POLICY);
for (int i = 0; i < 3; ++i) {
final Query query = new TermQuery(new Term("color", RandomPicks.randomFrom(random(), colors)));
searcher.search(new ConstantScoreQuery(query), 1);
}
}
queryCache.assertConsistent();
assertEquals(RamUsageTester.sizeOf(queryCache, acc), queryCache.ramBytesUsed());
}
w.close();
dir.close();
}
use of org.apache.lucene.document.StringField in project lucene-solr by apache.
the class TestLRUQueryCache method testUseRewrittenQueryAsCacheKey.
public void testUseRewrittenQueryAsCacheKey() throws IOException {
final Query expectedCacheKey = new TermQuery(new Term("foo", "bar"));
final BooleanQuery.Builder query = new BooleanQuery.Builder();
query.add(new BoostQuery(expectedCacheKey, 42f), Occur.MUST);
final LRUQueryCache queryCache = new LRUQueryCache(1000000, 10000000, context -> random().nextBoolean());
Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new StringField("foo", "bar", Store.YES));
w.addDocument(doc);
w.commit();
final IndexReader reader = w.getReader();
final IndexSearcher searcher = newSearcher(reader);
w.close();
final QueryCachingPolicy policy = new QueryCachingPolicy() {
@Override
public boolean shouldCache(Query query) throws IOException {
assertEquals(expectedCacheKey, query);
return true;
}
@Override
public void onUse(Query query) {
assertEquals(expectedCacheKey, query);
}
};
searcher.setQueryCache(queryCache);
searcher.setQueryCachingPolicy(policy);
searcher.search(query.build(), new TotalHitCountCollector());
reader.close();
dir.close();
}
use of org.apache.lucene.document.StringField in project lucene-solr by apache.
the class TestConjunctions method doc.
static Document doc(String v1, String v2) {
Document doc = new Document();
doc.add(new StringField(F1, v1, Store.YES));
doc.add(new TextField(F2, v2, Store.YES));
return doc;
}
use of org.apache.lucene.document.StringField in project lucene-solr by apache.
the class TestEarlyTerminatingSortingCollector method randomDocument.
private Document randomDocument() {
final Document doc = new Document();
doc.add(new NumericDocValuesField("ndv1", random().nextInt(10)));
doc.add(new NumericDocValuesField("ndv2", random().nextInt(10)));
doc.add(new StringField("s", RandomPicks.randomFrom(random(), terms), Store.YES));
return doc;
}
use of org.apache.lucene.document.StringField in project lucene-solr by apache.
the class TestIndexOrDocValuesQuery method testUseIndexForSelectiveQueries.
public void testUseIndexForSelectiveQueries() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setCodec(TestUtil.getDefaultCodec()));
for (int i = 0; i < 2000; ++i) {
Document doc = new Document();
if (i == 42) {
doc.add(new StringField("f1", "bar", Store.NO));
doc.add(new LongPoint("f2", 42L));
doc.add(new NumericDocValuesField("f2", 42L));
} else if (i == 100) {
doc.add(new StringField("f1", "foo", Store.NO));
doc.add(new LongPoint("f2", 2L));
doc.add(new NumericDocValuesField("f2", 2L));
} else {
doc.add(new StringField("f1", "bar", Store.NO));
doc.add(new LongPoint("f2", 2L));
doc.add(new NumericDocValuesField("f2", 2L));
}
w.addDocument(doc);
}
w.forceMerge(1);
IndexReader reader = DirectoryReader.open(w);
IndexSearcher searcher = newSearcher(reader);
searcher.setQueryCache(null);
// The term query is more selective, so the IndexOrDocValuesQuery should use doc values
final Query q1 = new BooleanQuery.Builder().add(new TermQuery(new Term("f1", "foo")), Occur.MUST).add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), NumericDocValuesField.newRangeQuery("f2", 2L, 2L)), Occur.MUST).build();
final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
final Scorer s1 = w1.scorer(searcher.getIndexReader().leaves().get(0));
// means we use doc values
assertNotNull(s1.twoPhaseIterator());
// The term query is less selective, so the IndexOrDocValuesQuery should use points
final Query q2 = new BooleanQuery.Builder().add(new TermQuery(new Term("f1", "bar")), Occur.MUST).add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), NumericDocValuesField.newRangeQuery("f2", 42L, 42L)), Occur.MUST).build();
final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());
final Scorer s2 = w2.scorer(searcher.getIndexReader().leaves().get(0));
// means we use points
assertNull(s2.twoPhaseIterator());
reader.close();
w.close();
dir.close();
}
Aggregations