use of org.apache.lucene.index.LeafReader in project lucene-solr by apache.
the class QueryBitSetProducer method getBitSet.
@Override
public BitSet getBitSet(LeafReaderContext context) throws IOException {
final LeafReader reader = context.reader();
final IndexReader.CacheHelper cacheHelper = reader.getCoreCacheHelper();
DocIdSet docIdSet = null;
if (cacheHelper != null) {
docIdSet = cache.get(cacheHelper.getKey());
}
if (docIdSet == null) {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final Scorer s = weight.scorer(context);
if (s == null) {
docIdSet = DocIdSet.EMPTY;
} else {
docIdSet = new BitDocIdSet(BitSet.of(s.iterator(), context.reader().maxDoc()));
}
if (cacheHelper != null) {
cache.put(cacheHelper.getKey(), docIdSet);
}
}
return docIdSet == DocIdSet.EMPTY ? null : ((BitDocIdSet) docIdSet).bits();
}
use of org.apache.lucene.index.LeafReader in project lucene-solr by apache.
the class Geo3DPointDistanceComparator method getLeafComparator.
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
LeafReader reader = context.reader();
FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (info != null) {
Geo3DDocValuesField.checkCompatible(info);
}
currentDocs = DocValues.getSortedNumeric(reader, field);
return this;
}
use of org.apache.lucene.index.LeafReader in project lucene-solr by apache.
the class TestCompressingTermVectorsFormat method testNoOrds.
// https://issues.apache.org/jira/browse/LUCENE-5156
public void testNoOrds() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
Document doc = new Document();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setStoreTermVectors(true);
doc.add(new Field("foo", "this is a test", ft));
iw.addDocument(doc);
LeafReader ir = getOnlyLeafReader(iw.getReader());
Terms terms = ir.getTermVector(0, "foo");
assertNotNull(terms);
TermsEnum termsEnum = terms.iterator();
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("this")));
try {
termsEnum.ord();
fail();
} catch (UnsupportedOperationException expected) {
// expected exception
}
try {
termsEnum.seekExact(0);
fail();
} catch (UnsupportedOperationException expected) {
// expected exception
}
ir.close();
iw.close();
dir.close();
}
use of org.apache.lucene.index.LeafReader in project lucene-solr by apache.
the class BitDocSet method getTopFilter.
@Override
public Filter getTopFilter() {
return new Filter() {
final FixedBitSet bs = bits;
@Override
public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) {
LeafReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
if (context.isTopLevel) {
return BitsFilteredDocIdSet.wrap(new BitDocIdSet(bs), acceptDocs);
}
final int base = context.docBase;
// one past the max doc in this segment.
final int max = base + reader.maxDoc();
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
int pos = base - 1;
int adjustedDoc = -1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() {
int next = pos + 1;
if (next >= max) {
return adjustedDoc = NO_MORE_DOCS;
} else {
pos = bs.nextSetBit(next);
return adjustedDoc = pos < max ? pos - base : NO_MORE_DOCS;
}
}
@Override
public int advance(int target) {
if (target == NO_MORE_DOCS)
return adjustedDoc = NO_MORE_DOCS;
int adjusted = target + base;
if (adjusted >= max) {
return adjustedDoc = NO_MORE_DOCS;
} else {
pos = bs.nextSetBit(adjusted);
return adjustedDoc = pos < max ? pos - base : NO_MORE_DOCS;
}
}
@Override
public long cost() {
// we don't want to actually compute cardinality, but
// if it's already been computed, we use it (pro-rated for the segment)
int maxDoc = max - base;
if (size != -1) {
return (long) (size * ((FixedBitSet.bits2words(maxDoc) << 6) / (float) bs.length()));
} else {
return maxDoc;
}
}
};
}
@Override
public long ramBytesUsed() {
return bs.ramBytesUsed();
}
@Override
public Bits bits() {
return new Bits() {
@Override
public boolean get(int index) {
return bs.get(index + base);
}
@Override
public int length() {
return max - base;
}
};
}
}, acceptDocs2);
}
@Override
public String toString(String field) {
return "BitSetDocTopFilter";
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) && Objects.equals(bs, getClass().cast(other).bs);
}
@Override
public int hashCode() {
return classHash() * 31 + bs.hashCode();
}
};
}
use of org.apache.lucene.index.LeafReader in project lucene-solr by apache.
the class DocSetBase method getTopFilter.
@Override
public Filter getTopFilter() {
return new Filter() {
final FixedBitSet bs = getBits();
@Override
public DocIdSet getDocIdSet(final LeafReaderContext context, Bits acceptDocs) {
LeafReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
if (context.isTopLevel) {
return BitsFilteredDocIdSet.wrap(new BitDocIdSet(bs), acceptDocs);
}
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
// one past the max doc in this segment.
final int max = base + maxDoc;
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
int pos = base - 1;
int adjustedDoc = -1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() {
// TODO: this is buggy if getBits() returns a bitset that does not have a capacity of maxDoc
pos = bs.nextSetBit(pos + 1);
return adjustedDoc = pos < max ? pos - base : NO_MORE_DOCS;
}
@Override
public int advance(int target) {
if (target == NO_MORE_DOCS)
return adjustedDoc = NO_MORE_DOCS;
pos = bs.nextSetBit(target + base);
return adjustedDoc = pos < max ? pos - base : NO_MORE_DOCS;
}
@Override
public long cost() {
return bs.length();
}
};
}
@Override
public long ramBytesUsed() {
return bs.ramBytesUsed();
}
@Override
public Bits bits() {
// sparse filters should not use random access
return null;
}
}, acceptDocs2);
}
@Override
public String toString(String field) {
return "DocSetTopFilter";
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) && Objects.equals(bs, getClass().cast(other).bs);
}
@Override
public int hashCode() {
return classHash() ^ bs.hashCode();
}
};
}
Aggregations