use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class BaseGeoPointTestCase method verifyRandomRectangles.
protected void verifyRandomRectangles(double[] lats, double[] lons) throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
// Else seeds may not reproduce:
iwc.setMergeScheduler(new SerialMergeScheduler());
// Else we can get O(N^2) merging:
int mbd = iwc.getMaxBufferedDocs();
if (mbd != -1 && mbd < lats.length / 100) {
iwc.setMaxBufferedDocs(lats.length / 100);
}
Directory dir;
if (lats.length > 100000) {
dir = newFSDirectory(createTempDir(getClass().getSimpleName()));
} else {
dir = newDirectory();
}
Set<Integer> deleted = new HashSet<>();
// RandomIndexWriter is too slow here:
IndexWriter w = new IndexWriter(dir, iwc);
for (int id = 0; id < lats.length; id++) {
Document doc = new Document();
doc.add(newStringField("id", "" + id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", id));
if (Double.isNaN(lats[id]) == false) {
addPointToDoc(FIELD_NAME, doc, lats[id], lons[id]);
}
w.addDocument(doc);
if (id > 0 && random().nextInt(100) == 42) {
int idToDelete = random().nextInt(id);
w.deleteDocuments(new Term("id", "" + idToDelete));
deleted.add(idToDelete);
if (VERBOSE) {
System.out.println(" delete id=" + idToDelete);
}
}
}
if (random().nextBoolean()) {
w.forceMerge(1);
}
final IndexReader r = DirectoryReader.open(w);
w.close();
IndexSearcher s = newSearcher(r);
int iters = atLeast(25);
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
int maxDoc = s.getIndexReader().maxDoc();
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " s=" + s);
}
Rectangle rect = nextBox();
Query query = newRectQuery(FIELD_NAME, rect.minLat, rect.maxLat, rect.minLon, rect.maxLon);
if (VERBOSE) {
System.out.println(" query=" + query);
}
final FixedBitSet hits = new FixedBitSet(maxDoc);
s.search(query, new SimpleCollector() {
private int docBase;
@Override
public boolean needsScores() {
return false;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
docBase = context.docBase;
}
@Override
public void collect(int doc) {
hits.set(docBase + doc);
}
});
boolean fail = false;
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
for (int docID = 0; docID < maxDoc; docID++) {
assertEquals(docID, docIDToID.nextDoc());
int id = (int) docIDToID.longValue();
boolean expected;
if (liveDocs != null && liveDocs.get(docID) == false) {
// document is deleted
expected = false;
} else if (Double.isNaN(lats[id])) {
expected = false;
} else {
expected = rectContainsPoint(rect, lats[id], lons[id]);
}
if (hits.get(docID) != expected) {
StringBuilder b = new StringBuilder();
b.append("docID=(" + docID + ")\n");
if (expected) {
b.append("FAIL: id=" + id + " should match but did not\n");
} else {
b.append("FAIL: id=" + id + " should not match but did\n");
}
b.append(" box=" + rect + "\n");
b.append(" query=" + query + " docID=" + docID + "\n");
b.append(" lat=" + lats[id] + " lon=" + lons[id] + "\n");
b.append(" deleted?=" + (liveDocs != null && liveDocs.get(docID) == false));
if (true) {
fail("wrong hit (first of possibly more):\n\n" + b);
} else {
System.out.println(b.toString());
fail = true;
}
}
}
if (fail) {
fail("some hits were wrong");
}
}
IOUtils.close(r, dir);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class BaseGeoPointTestCase method verifyRandomDistances.
protected void verifyRandomDistances(double[] lats, double[] lons) throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
// Else seeds may not reproduce:
iwc.setMergeScheduler(new SerialMergeScheduler());
// Else we can get O(N^2) merging:
int mbd = iwc.getMaxBufferedDocs();
if (mbd != -1 && mbd < lats.length / 100) {
iwc.setMaxBufferedDocs(lats.length / 100);
}
Directory dir;
if (lats.length > 100000) {
dir = newFSDirectory(createTempDir(getClass().getSimpleName()));
} else {
dir = newDirectory();
}
Set<Integer> deleted = new HashSet<>();
// RandomIndexWriter is too slow here:
IndexWriter w = new IndexWriter(dir, iwc);
for (int id = 0; id < lats.length; id++) {
Document doc = new Document();
doc.add(newStringField("id", "" + id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", id));
if (Double.isNaN(lats[id]) == false) {
addPointToDoc(FIELD_NAME, doc, lats[id], lons[id]);
}
w.addDocument(doc);
if (id > 0 && random().nextInt(100) == 42) {
int idToDelete = random().nextInt(id);
w.deleteDocuments(new Term("id", "" + idToDelete));
deleted.add(idToDelete);
if (VERBOSE) {
System.out.println(" delete id=" + idToDelete);
}
}
}
if (random().nextBoolean()) {
w.forceMerge(1);
}
final IndexReader r = DirectoryReader.open(w);
w.close();
IndexSearcher s = newSearcher(r);
int iters = atLeast(25);
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
int maxDoc = s.getIndexReader().maxDoc();
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " s=" + s);
}
// Distance
final double centerLat = nextLatitude();
final double centerLon = nextLongitude();
// So the query can cover at most 50% of the earth's surface:
final double radiusMeters = random().nextDouble() * GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI / 2.0 + 1.0;
if (VERBOSE) {
final DecimalFormat df = new DecimalFormat("#,###.00", DecimalFormatSymbols.getInstance(Locale.ENGLISH));
System.out.println(" radiusMeters = " + df.format(radiusMeters));
}
Query query = newDistanceQuery(FIELD_NAME, centerLat, centerLon, radiusMeters);
if (VERBOSE) {
System.out.println(" query=" + query);
}
final FixedBitSet hits = new FixedBitSet(maxDoc);
s.search(query, new SimpleCollector() {
private int docBase;
@Override
public boolean needsScores() {
return false;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
docBase = context.docBase;
}
@Override
public void collect(int doc) {
hits.set(docBase + doc);
}
});
boolean fail = false;
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
for (int docID = 0; docID < maxDoc; docID++) {
assertEquals(docID, docIDToID.nextDoc());
int id = (int) docIDToID.longValue();
boolean expected;
if (liveDocs != null && liveDocs.get(docID) == false) {
// document is deleted
expected = false;
} else if (Double.isNaN(lats[id])) {
expected = false;
} else {
expected = SloppyMath.haversinMeters(centerLat, centerLon, lats[id], lons[id]) <= radiusMeters;
}
if (hits.get(docID) != expected) {
StringBuilder b = new StringBuilder();
if (expected) {
b.append("FAIL: id=" + id + " should match but did not\n");
} else {
b.append("FAIL: id=" + id + " should not match but did\n");
}
b.append(" query=" + query + " docID=" + docID + "\n");
b.append(" lat=" + lats[id] + " lon=" + lons[id] + "\n");
b.append(" deleted?=" + (liveDocs != null && liveDocs.get(docID) == false));
if (Double.isNaN(lats[id]) == false) {
double distanceMeters = SloppyMath.haversinMeters(centerLat, centerLon, lats[id], lons[id]);
b.append(" centerLat=" + centerLat + " centerLon=" + centerLon + " distanceMeters=" + distanceMeters + " vs radiusMeters=" + radiusMeters);
}
if (true) {
fail("wrong hit (first of possibly more):\n\n" + b);
} else {
System.out.println(b.toString());
fail = true;
}
}
}
if (fail) {
fail("some hits were wrong");
}
}
IOUtils.close(r, dir);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class BaseGeoPointTestCase method verifyRandomPolygons.
protected void verifyRandomPolygons(double[] lats, double[] lons) throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
// Else seeds may not reproduce:
iwc.setMergeScheduler(new SerialMergeScheduler());
// Else we can get O(N^2) merging:
int mbd = iwc.getMaxBufferedDocs();
if (mbd != -1 && mbd < lats.length / 100) {
iwc.setMaxBufferedDocs(lats.length / 100);
}
Directory dir;
if (lats.length > 100000) {
dir = newFSDirectory(createTempDir(getClass().getSimpleName()));
} else {
dir = newDirectory();
}
Set<Integer> deleted = new HashSet<>();
// RandomIndexWriter is too slow here:
IndexWriter w = new IndexWriter(dir, iwc);
for (int id = 0; id < lats.length; id++) {
Document doc = new Document();
doc.add(newStringField("id", "" + id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", id));
if (Double.isNaN(lats[id]) == false) {
addPointToDoc(FIELD_NAME, doc, lats[id], lons[id]);
}
w.addDocument(doc);
if (id > 0 && random().nextInt(100) == 42) {
int idToDelete = random().nextInt(id);
w.deleteDocuments(new Term("id", "" + idToDelete));
deleted.add(idToDelete);
if (VERBOSE) {
System.out.println(" delete id=" + idToDelete);
}
}
}
if (random().nextBoolean()) {
w.forceMerge(1);
}
final IndexReader r = DirectoryReader.open(w);
w.close();
// We can't wrap with "exotic" readers because points needs to work:
IndexSearcher s = newSearcher(r);
final int iters = atLeast(75);
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
int maxDoc = s.getIndexReader().maxDoc();
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " s=" + s);
}
// Polygon
Polygon polygon = nextPolygon();
Query query = newPolygonQuery(FIELD_NAME, polygon);
if (VERBOSE) {
System.out.println(" query=" + query);
}
final FixedBitSet hits = new FixedBitSet(maxDoc);
s.search(query, new SimpleCollector() {
private int docBase;
@Override
public boolean needsScores() {
return false;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
docBase = context.docBase;
}
@Override
public void collect(int doc) {
hits.set(docBase + doc);
}
});
boolean fail = false;
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
for (int docID = 0; docID < maxDoc; docID++) {
assertEquals(docID, docIDToID.nextDoc());
int id = (int) docIDToID.longValue();
boolean expected;
if (liveDocs != null && liveDocs.get(docID) == false) {
// document is deleted
expected = false;
} else if (Double.isNaN(lats[id])) {
expected = false;
} else {
expected = GeoTestUtil.containsSlowly(polygon, lats[id], lons[id]);
}
if (hits.get(docID) != expected) {
StringBuilder b = new StringBuilder();
if (expected) {
b.append("FAIL: id=" + id + " should match but did not\n");
} else {
b.append("FAIL: id=" + id + " should not match but did\n");
}
b.append(" query=" + query + " docID=" + docID + "\n");
b.append(" lat=" + lats[id] + " lon=" + lons[id] + "\n");
b.append(" deleted?=" + (liveDocs != null && liveDocs.get(docID) == false));
b.append(" polygon=" + polygon);
if (true) {
fail("wrong hit (first of possibly more):\n\n" + b);
} else {
System.out.println(b.toString());
fail = true;
}
}
}
if (fail) {
fail("some hits were wrong");
}
}
IOUtils.close(r, dir);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class LTRRescorer method extractFeaturesInfo.
public static LTRScoringQuery.FeatureInfo[] extractFeaturesInfo(LTRScoringQuery.ModelWeight modelWeight, int docid, Float originalDocScore, List<LeafReaderContext> leafContexts) throws IOException {
final int n = ReaderUtil.subIndex(docid, leafContexts);
final LeafReaderContext atomicContext = leafContexts.get(n);
final int deBasedDoc = docid - atomicContext.docBase;
final LTRScoringQuery.ModelWeight.ModelScorer r = modelWeight.scorer(atomicContext);
if ((r == null) || (r.iterator().advance(deBasedDoc) != deBasedDoc)) {
return new LTRScoringQuery.FeatureInfo[0];
} else {
if (originalDocScore != null) {
// If results have not been reranked, the score passed in is the original query's
// score, which some features can use instead of recalculating it
r.getDocInfo().setOriginalDocScore(originalDocScore);
}
r.score();
return modelWeight.getFeaturesInfo();
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class LTRRescorer method explain.
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext().leaves();
final int n = ReaderUtil.subIndex(docID, leafContexts);
final LeafReaderContext context = leafContexts.get(n);
final int deBasedDoc = docID - context.docBase;
final Weight modelWeight = searcher.createNormalizedWeight(scoringQuery, true);
return modelWeight.explain(context, deBasedDoc);
}
Aggregations