use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class PointInGeo3DShapeQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
LeafReader reader = context.reader();
PointValues values = reader.getPointValues(field);
if (values == null) {
return null;
}
/*
XYZBounds bounds = new XYZBounds();
shape.getBounds(bounds);
final double planetMax = planetModel.getMaximumMagnitude();
if (planetMax != treeDV.planetMax) {
throw new IllegalStateException(planetModel + " is not the same one used during indexing: planetMax=" + planetMax + " vs indexing planetMax=" + treeDV.planetMax);
}
*/
/*
GeoArea xyzSolid = GeoAreaFactory.makeGeoArea(planetModel,
bounds.getMinimumX(),
bounds.getMaximumX(),
bounds.getMinimumY(),
bounds.getMaximumY(),
bounds.getMinimumZ(),
bounds.getMaximumZ());
assert xyzSolid.getRelationship(shape) == GeoArea.WITHIN || xyzSolid.getRelationship(shape) == GeoArea.OVERLAPS: "expected WITHIN (1) or OVERLAPS (2) but got " + xyzSolid.getRelationship(shape) + "; shape="+shape+"; XYZSolid="+xyzSolid;
*/
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
values.intersect(new PointInShapeIntersectVisitor(result, shape, shapeBounds));
return new ConstantScoreScorer(this, score(), result.build().iterator());
}
};
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class QueryUtils method checkFirstSkipTo.
/** check that first skip on just created scorers always goes to the right doc */
public static void checkFirstSkipTo(final Query q, final IndexSearcher s) throws IOException {
//System.out.println("checkFirstSkipTo: "+q);
final float maxDiff = 1e-3f;
final int[] lastDoc = { -1 };
final LeafReader[] lastReader = { null };
final List<LeafReaderContext> context = s.getTopReaderContext().leaves();
s.search(q, new SimpleCollector() {
private Scorer scorer;
private int leafPtr;
@Override
public void setScorer(Scorer scorer) {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
float score = scorer.score();
try {
long startMS = System.currentTimeMillis();
for (int i = lastDoc[0] + 1; i <= doc; i++) {
Weight w = s.createNormalizedWeight(q, true);
Scorer scorer = w.scorer(context.get(leafPtr));
Assert.assertTrue("query collected " + doc + " but advance(" + i + ") says no more docs!", scorer.iterator().advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected " + doc + " but advance(" + i + ") got to " + scorer.docID(), doc, scorer.docID());
float advanceScore = scorer.score();
Assert.assertEquals("unstable advance(" + i + ") score!", advanceScore, scorer.score(), maxDiff);
Assert.assertEquals("query assigned doc " + doc + " a score of <" + score + "> but advance(" + i + ") has <" + advanceScore + ">!", score, advanceScore, maxDiff);
// if you got SimpleText codec this will kick in):
if (i < doc && System.currentTimeMillis() - startMS > 5) {
i = doc - 1;
}
}
lastDoc[0] = doc;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean needsScores() {
return true;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
// previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) {
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
Scorer scorer = w.scorer((LeafReaderContext) indexSearcher.getTopReaderContext());
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
boolean more = false;
final Bits liveDocs = context.reader().getLiveDocs();
for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(d)) {
more = true;
break;
}
}
Assert.assertFalse("query's last doc was " + lastDoc[0] + " but advance(" + (lastDoc[0] + 1) + ") got to " + scorer.docID(), more);
}
leafPtr++;
}
lastReader[0] = context.reader();
lastDoc[0] = -1;
}
});
if (lastReader[0] != null) {
// confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
Scorer scorer = w.scorer((LeafReaderContext) indexSearcher.getTopReaderContext());
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
boolean more = false;
final Bits liveDocs = lastReader[0].getLiveDocs();
for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(d)) {
more = true;
break;
}
}
Assert.assertFalse("query's last doc was " + lastDoc[0] + " but advance(" + (lastDoc[0] + 1) + ") got to " + scorer.docID(), more);
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class QueryUtils method checkSkipTo.
/** alternate scorer advance(),advance(),next(),next(),advance(),advance(), etc
* and ensure a hitcollector receives same docs and scores
*/
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
//System.out.println("Checking "+q);
final List<LeafReaderContext> readerContextArray = s.getTopReaderContext().leaves();
final int skip_op = 0;
final int next_op = 1;
final int[][] orders = { { next_op }, { skip_op }, { skip_op, next_op }, { next_op, skip_op }, { skip_op, skip_op, next_op, next_op }, { next_op, next_op, skip_op, skip_op }, { skip_op, skip_op, skip_op, next_op, next_op } };
for (int k = 0; k < orders.length; k++) {
final int[] order = orders[k];
// System.out.print("Order:");for (int i = 0; i < order.length; i++)
// System.out.print(order[i]==skip_op ? " skip()":" next()");
// System.out.println();
final int[] opidx = { 0 };
final int[] lastDoc = { -1 };
// FUTURE: ensure scorer.doc()==-1
final float maxDiff = 1e-5f;
final LeafReader[] lastReader = { null };
s.search(q, new SimpleCollector() {
private Scorer sc;
private Scorer scorer;
private DocIdSetIterator iterator;
private int leafPtr;
@Override
public void setScorer(Scorer scorer) {
this.sc = scorer;
}
@Override
public void collect(int doc) throws IOException {
float score = sc.score();
lastDoc[0] = doc;
try {
if (scorer == null) {
Weight w = s.createNormalizedWeight(q, true);
LeafReaderContext context = readerContextArray.get(leafPtr);
scorer = w.scorer(context);
iterator = scorer.iterator();
}
int op = order[(opidx[0]++) % order.length];
// System.out.println(op==skip_op ?
// "skip("+(sdoc[0]+1)+")":"next()");
boolean more = op == skip_op ? iterator.advance(scorer.docID() + 1) != DocIdSetIterator.NO_MORE_DOCS : iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
int scorerDoc = scorer.docID();
float scorerScore = scorer.score();
float scorerScore2 = scorer.score();
float scoreDiff = Math.abs(score - scorerScore);
float scorerDiff = Math.abs(scorerScore2 - scorerScore);
boolean success = false;
try {
assertTrue(more);
assertEquals("scorerDoc=" + scorerDoc + ",doc=" + doc, scorerDoc, doc);
assertTrue("score=" + score + ", scorerScore=" + scorerScore, scoreDiff <= maxDiff);
assertTrue("scorerScorer=" + scorerScore + ", scorerScore2=" + scorerScore2, scorerDiff <= maxDiff);
success = true;
} finally {
if (!success) {
if (LuceneTestCase.VERBOSE) {
StringBuilder sbord = new StringBuilder();
for (int i = 0; i < order.length; i++) {
sbord.append(order[i] == skip_op ? " skip()" : " next()");
}
System.out.println("ERROR matching docs:" + "\n\t" + (doc != scorerDoc ? "--> " : "") + "doc=" + doc + ", scorerDoc=" + scorerDoc + "\n\t" + (!more ? "--> " : "") + "tscorer.more=" + more + "\n\t" + (scoreDiff > maxDiff ? "--> " : "") + "scorerScore=" + scorerScore + " scoreDiff=" + scoreDiff + " maxDiff=" + maxDiff + "\n\t" + (scorerDiff > maxDiff ? "--> " : "") + "scorerScore2=" + scorerScore2 + " scorerDiff=" + scorerDiff + "\n\thitCollector.doc=" + doc + " score=" + score + "\n\t Scorer=" + scorer + "\n\t Query=" + q + " " + q.getClass().getName() + "\n\t Searcher=" + s + "\n\t Order=" + sbord + "\n\t Op=" + (op == skip_op ? " skip()" : " next()"));
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public boolean needsScores() {
return true;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
// previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) {
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
LeafReaderContext ctx = (LeafReaderContext) indexSearcher.getTopReaderContext();
Scorer scorer = w.scorer(ctx);
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
boolean more = false;
final Bits liveDocs = context.reader().getLiveDocs();
for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(d)) {
more = true;
break;
}
}
Assert.assertFalse("query's last doc was " + lastDoc[0] + " but advance(" + (lastDoc[0] + 1) + ") got to " + scorer.docID(), more);
}
leafPtr++;
}
lastReader[0] = context.reader();
assert readerContextArray.get(leafPtr).reader() == context.reader();
this.scorer = null;
lastDoc[0] = -1;
}
});
if (lastReader[0] != null) {
// confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity(true));
Weight w = indexSearcher.createNormalizedWeight(q, true);
LeafReaderContext ctx = previousReader.getContext();
Scorer scorer = w.scorer(ctx);
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
boolean more = false;
final Bits liveDocs = lastReader[0].getLiveDocs();
for (int d = iterator.advance(lastDoc[0] + 1); d != DocIdSetIterator.NO_MORE_DOCS; d = iterator.nextDoc()) {
if (liveDocs == null || liveDocs.get(d)) {
more = true;
break;
}
}
Assert.assertFalse("query's last doc was " + lastDoc[0] + " but advance(" + (lastDoc[0] + 1) + ") got to " + scorer.docID(), more);
}
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class QueryUtils method checkBulkScorerSkipTo.
/** Check that the scorer and bulk scorer advance consistently. */
public static void checkBulkScorerSkipTo(Random r, Query query, IndexSearcher searcher) throws IOException {
Weight weight = searcher.createNormalizedWeight(query, true);
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(context);
final BulkScorer bulkScorer = weight.bulkScorer(context);
if (scorer == null && bulkScorer == null) {
continue;
} else if (bulkScorer == null) {
// ensure scorer is exhausted (it just didnt return null)
assert scorer.iterator().nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
continue;
}
DocIdSetIterator iterator = scorer.iterator();
int upTo = 0;
while (true) {
final int min = upTo + r.nextInt(5);
final int max = min + 1 + r.nextInt(r.nextBoolean() ? 10 : 5000);
if (scorer.docID() < min) {
iterator.advance(min);
}
final int next = bulkScorer.score(new LeafCollector() {
Scorer scorer2;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer2 = scorer;
}
@Override
public void collect(int doc) throws IOException {
assert doc >= min;
assert doc < max;
Assert.assertEquals(scorer.docID(), doc);
Assert.assertEquals(scorer.score(), scorer2.score(), 0.01f);
iterator.nextDoc();
}
}, null, min, max);
assert max <= next;
assert next <= scorer.docID();
upTo = max;
if (scorer.docID() == DocIdSetIterator.NO_MORE_DOCS) {
bulkScorer.score(new LeafCollector() {
@Override
public void setScorer(Scorer scorer) throws IOException {
}
@Override
public void collect(int doc) throws IOException {
// no more matches
assert false;
}
}, null, upTo, DocIdSetIterator.NO_MORE_DOCS);
break;
}
}
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class ExportWriter method writeDocs.
protected void writeDocs(SolrQueryRequest req, IteratorWriter.ItemWriter writer, Sort sort) throws IOException {
//Write the data.
List<LeafReaderContext> leaves = req.getSearcher().getTopReaderContext().leaves();
SortDoc sortDoc = getSortDoc(req.getSearcher(), sort.getSort());
int count = 0;
int queueSize = 30000;
SortQueue queue = new SortQueue(queueSize, sortDoc);
SortDoc[] outDocs = new SortDoc[queueSize];
while (count < totalHits) {
//long begin = System.nanoTime();
queue.reset();
SortDoc top = queue.top();
for (int i = 0; i < leaves.size(); i++) {
sortDoc.setNextReader(leaves.get(i));
// cost is not useful here
DocIdSetIterator it = new BitSetIterator(sets[i], 0);
int docId = -1;
while ((docId = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
sortDoc.setValues(docId);
if (top.lessThan(sortDoc)) {
top.setValues(sortDoc);
top = queue.updateTop();
}
}
}
int outDocsIndex = -1;
for (int i = 0; i < queueSize; i++) {
SortDoc s = queue.pop();
if (s.docId > -1) {
outDocs[++outDocsIndex] = s;
}
}
//long end = System.nanoTime();
count += (outDocsIndex + 1);
try {
for (int i = outDocsIndex; i >= 0; --i) {
SortDoc s = outDocs[i];
writer.add((MapWriter) ew -> {
writeDoc(s, leaves, ew);
s.reset();
});
}
} catch (Throwable e) {
Throwable ex = e;
while (ex != null) {
String m = ex.getMessage();
if (m != null && m.contains("Broken pipe")) {
throw new IgnoreException();
}
ex = ex.getCause();
}
if (e instanceof IOException) {
throw ((IOException) e);
} else {
throw new IOException(e);
}
}
}
}
Aggregations