use of org.apache.lucene.index.IndexReaderContext in project elasticsearch by elastic.
the class BitsetFilterCache method getAndLoadIfNotPresent.
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
final Object coreCacheReader = context.reader().getCoreCacheKey();
final ShardId shardId = ShardUtils.extractShardId(context.reader());
if (// can't require it because of the percolator
shardId != null && indexSettings.getIndex().equals(shardId.getIndex()) == false) {
// insanity
throw new IllegalStateException("Trying to load bit set for index " + shardId.getIndex() + " with cache of index " + indexSettings.getIndex());
}
Cache<Query, Value> filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> {
context.reader().addCoreClosedListener(BitsetFilterCache.this);
return CacheBuilder.<Query, Value>builder().build();
});
return filterToFbs.computeIfAbsent(query, key -> {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
Scorer s = weight.scorer(context);
final BitSet bitSet;
if (s == null) {
bitSet = null;
} else {
bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
}
Value value = new Value(bitSet, shardId);
listener.onCache(shardId, value.bitset);
return value;
}).bitset;
}
use of org.apache.lucene.index.IndexReaderContext in project elasticsearch by elastic.
the class AggregatorTestCase method searchAndReduce.
/**
* Divides the provided {@link IndexSearcher} in sub-searcher, one for each segment,
* builds an aggregator for each sub-searcher filtered by the provided {@link Query} and
* returns the reduced {@link InternalAggregation}.
*/
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSearcher searcher, Query query, AggregationBuilder builder, MappedFieldType... fieldTypes) throws IOException {
final IndexReaderContext ctx = searcher.getTopReaderContext();
final ShardSearcher[] subSearchers;
if (ctx instanceof LeafReaderContext) {
subSearchers = new ShardSearcher[1];
subSearchers[0] = new ShardSearcher((LeafReaderContext) ctx, ctx);
} else {
final CompositeReaderContext compCTX = (CompositeReaderContext) ctx;
final int size = compCTX.leaves().size();
subSearchers = new ShardSearcher[size];
for (int searcherIDX = 0; searcherIDX < subSearchers.length; searcherIDX++) {
final LeafReaderContext leave = compCTX.leaves().get(searcherIDX);
subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX);
}
}
List<InternalAggregation> aggs = new ArrayList<>();
Query rewritten = searcher.rewrite(query);
Weight weight = searcher.createWeight(rewritten, true);
C root = createAggregator(builder, searcher, fieldTypes);
try {
for (ShardSearcher subSearcher : subSearchers) {
C a = createAggregator(builder, subSearcher, fieldTypes);
a.preCollection();
subSearcher.search(weight, a);
a.postCollection();
aggs.add(a.buildAggregation(0L));
}
if (aggs.isEmpty()) {
return null;
} else {
if (randomBoolean()) {
// sometimes do an incremental reduce
List<InternalAggregation> internalAggregations = randomSubsetOf(randomIntBetween(1, aggs.size()), aggs);
A internalAgg = (A) aggs.get(0).doReduce(internalAggregations, new InternalAggregation.ReduceContext(root.context().bigArrays(), null, false));
aggs.removeAll(internalAggregations);
aggs.add(internalAgg);
}
// now do the final reduce
@SuppressWarnings("unchecked") A internalAgg = (A) aggs.get(0).doReduce(aggs, new InternalAggregation.ReduceContext(root.context().bigArrays(), null, true));
return internalAgg;
}
} finally {
Releasables.close(releasables);
releasables.clear();
}
}
use of org.apache.lucene.index.IndexReaderContext in project elasticsearch by elastic.
the class NestedAggregator method getLeafCollector.
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
Weight weight = searcher.createNormalizedWeight(childFilter, false);
Scorer childDocsScorer = weight.scorer(ctx);
final BitSet parentDocs = parentFilter.getBitSet(ctx);
final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
return new LeafBucketCollectorBase(sub, null) {
@Override
public void collect(int parentDoc, long bucket) throws IOException {
// doc), so we can skip:
if (parentDoc == 0 || parentDocs == null || childDocs == null) {
return;
}
final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int childDocId = childDocs.docID();
if (childDocId <= prevParentDoc) {
childDocId = childDocs.advance(prevParentDoc + 1);
}
for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
collectBucket(sub, childDocId, bucket);
}
}
};
}
use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class TestIndexSearcher method getStringVal.
private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
SchemaField sf = sqr.getSchema().getField(field);
ValueSource vs = sf.getType().getValueSource(sf, null);
Map context = ValueSource.newContext(sqr.getSearcher());
vs.createWeight(context, sqr.getSearcher());
IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
List<LeafReaderContext> leaves = topReaderContext.leaves();
int idx = ReaderUtil.subIndex(doc, leaves);
LeafReaderContext leaf = leaves.get(idx);
FunctionValues vals = vs.getValues(context, leaf);
return vals.strVal(doc - leaf.docBase);
}
use of org.apache.lucene.index.IndexReaderContext in project lucene-solr by apache.
the class TestIndexSearcher method testReopen.
public void testReopen() throws Exception {
assertU(adoc("id", "1", "v_t", "Hello Dude", "v_s1", "string1"));
assertU(adoc("id", "2", "v_t", "Hello Yonik", "v_s1", "string2"));
assertU(commit());
SolrQueryRequest sr1 = req("q", "foo");
IndexReader r1 = sr1.getSearcher().getRawReader();
String sval1 = getStringVal(sr1, "v_s1", 0);
assertEquals("string1", sval1);
assertU(adoc("id", "3", "v_s1", "{!literal}"));
assertU(adoc("id", "4", "v_s1", "other stuff"));
assertU(commit());
SolrQueryRequest sr2 = req("q", "foo");
IndexReader r2 = sr2.getSearcher().getRawReader();
// make sure the readers share the first segment
// Didn't work w/ older versions of lucene2.9 going from segment -> multi
assertEquals(r1.leaves().get(0).reader(), r2.leaves().get(0).reader());
assertU(adoc("id", "5", "v_f", "3.14159"));
assertU(adoc("id", "6", "v_f", "8983", "v_s1", "string6"));
assertU(commit());
SolrQueryRequest sr3 = req("q", "foo");
IndexReader r3 = sr3.getSearcher().getRawReader();
// make sure the readers share segments
// assertEquals(r1.getLeafReaders()[0], r3.getLeafReaders()[0]);
assertEquals(r2.leaves().get(0).reader(), r3.leaves().get(0).reader());
assertEquals(r2.leaves().get(1).reader(), r3.leaves().get(1).reader());
sr1.close();
sr2.close();
// should currently be 1, but this could change depending on future index management
int baseRefCount = r3.getRefCount();
assertEquals(1, baseRefCount);
Map<String, Metric> metrics = h.getCore().getCoreMetricManager().getRegistry().getMetrics();
Gauge<Date> g = (Gauge<Date>) metrics.get("SEARCHER.searcher.registeredAt");
Date sr3SearcherRegAt = g.getValue();
// nothing has changed
assertU(commit());
SolrQueryRequest sr4 = req("q", "foo");
assertSame("nothing changed, searcher should be the same", sr3.getSearcher(), sr4.getSearcher());
assertEquals("nothing changed, searcher should not have been re-registered", sr3SearcherRegAt, g.getValue());
IndexReader r4 = sr4.getSearcher().getRawReader();
// force an index change so the registered searcher won't be the one we are testing (and
// then we should be able to test the refCount going all the way to 0
assertU(adoc("id", "7", "v_f", "7574"));
assertU(commit());
// test that reader didn't change
assertSame(r3, r4);
assertEquals(baseRefCount, r4.getRefCount());
sr3.close();
assertEquals(baseRefCount, r4.getRefCount());
sr4.close();
assertEquals(baseRefCount - 1, r4.getRefCount());
SolrQueryRequest sr5 = req("q", "foo");
IndexReaderContext rCtx5 = sr5.getSearcher().getTopReaderContext();
assertU(delI("1"));
assertU(commit());
SolrQueryRequest sr6 = req("q", "foo");
IndexReaderContext rCtx6 = sr6.getSearcher().getTopReaderContext();
// only a single doc left in the first segment
assertEquals(1, rCtx6.leaves().get(0).reader().numDocs());
// readers now different
assertTrue(!rCtx5.leaves().get(0).reader().equals(rCtx6.leaves().get(0).reader()));
sr5.close();
sr6.close();
}
Aggregations