use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class TestRangeFacetCounts method testCustomDoubleValuesSource.
public void testCustomDoubleValuesSource() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
writer.addDocument(doc);
writer.addDocument(doc);
// Test wants 3 docs in one segment:
writer.forceMerge(1);
final DoubleValuesSource vs = new DoubleValuesSource() {
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
return new DoubleValues() {
int doc = -1;
@Override
public double doubleValue() throws IOException {
return doc + 1;
}
@Override
public boolean advanceExact(int doc) throws IOException {
this.doc = doc;
return true;
}
};
}
@Override
public boolean needsScores() {
return false;
}
@Override
public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
return Explanation.match(docId + 1, "");
}
};
FacetsConfig config = new FacetsConfig();
FacetsCollector fc = new FacetsCollector();
IndexReader r = writer.getReader();
IndexSearcher s = newSearcher(r);
s.search(new MatchAllDocsQuery(), fc);
final DoubleRange[] ranges = new DoubleRange[] { new DoubleRange("< 1", 0.0, true, 1.0, false), new DoubleRange("< 2", 0.0, true, 2.0, false), new DoubleRange("< 5", 0.0, true, 5.0, false), new DoubleRange("< 10", 0.0, true, 10.0, false), new DoubleRange("< 20", 0.0, true, 20.0, false), new DoubleRange("< 50", 0.0, true, 50.0, false) };
final Query fastMatchFilter;
final AtomicBoolean filterWasUsed = new AtomicBoolean();
if (random().nextBoolean()) {
// Sort of silly:
final Query in = new MatchAllDocsQuery();
fastMatchFilter = new UsedQuery(in, filterWasUsed);
} else {
fastMatchFilter = null;
}
if (VERBOSE) {
System.out.println("TEST: fastMatchFilter=" + fastMatchFilter);
}
Facets facets = new DoubleRangeFacetCounts("field", vs, fc, fastMatchFilter, ranges);
assertEquals("dim=field path=[] value=3 childCount=6\n < 1 (0)\n < 2 (1)\n < 5 (3)\n < 10 (3)\n < 20 (3)\n < 50 (3)\n", facets.getTopChildren(10, "field").toString());
assertTrue(fastMatchFilter == null || filterWasUsed.get());
DrillDownQuery ddq = new DrillDownQuery(config);
ddq.add("field", ranges[1].getQuery(fastMatchFilter, vs));
// Test simple drill-down:
assertEquals(1, s.search(ddq, 10).totalHits);
// Test drill-sideways after drill-down
DrillSideways ds = new DrillSideways(s, config, (TaxonomyReader) null) {
@Override
protected Facets buildFacetsResult(FacetsCollector drillDowns, FacetsCollector[] drillSideways, String[] drillSidewaysDims) throws IOException {
assert drillSideways.length == 1;
return new DoubleRangeFacetCounts("field", vs, drillSideways[0], fastMatchFilter, ranges);
}
@Override
protected boolean scoreSubDocsAtOnce() {
return random().nextBoolean();
}
};
DrillSidewaysResult dsr = ds.search(ddq, 10);
assertEquals(1, dsr.hits.totalHits);
assertEquals("dim=field path=[] value=3 childCount=6\n < 1 (0)\n < 2 (1)\n < 5 (3)\n < 10 (3)\n < 20 (3)\n < 50 (3)\n", dsr.facets.getTopChildren(10, "field").toString());
writer.close();
IOUtils.close(r, dir);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class AnalyzingInfixSuggester method ramBytesUsed.
@Override
public long ramBytesUsed() {
long mem = RamUsageEstimator.shallowSizeOf(this);
try {
if (searcherMgr != null) {
SearcherManager mgr;
IndexSearcher searcher;
synchronized (searcherMgrLock) {
// acquire & release on same SearcherManager, via local reference
mgr = searcherMgr;
searcher = mgr.acquire();
}
try {
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
LeafReader reader = FilterLeafReader.unwrap(context.reader());
if (reader instanceof SegmentReader) {
mem += ((SegmentReader) context.reader()).ramBytesUsed();
}
}
} finally {
mgr.release(searcher);
}
}
return mem;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class AnalyzingInfixSuggester method getChildResources.
@Override
public Collection<Accountable> getChildResources() {
List<Accountable> resources = new ArrayList<>();
try {
if (searcherMgr != null) {
SearcherManager mgr;
IndexSearcher searcher;
synchronized (searcherMgrLock) {
// acquire & release on same SearcherManager, via local reference
mgr = searcherMgr;
searcher = mgr.acquire();
}
try {
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
LeafReader reader = FilterLeafReader.unwrap(context.reader());
if (reader instanceof SegmentReader) {
resources.add(Accountables.namedAccountable("segment", (SegmentReader) reader));
}
}
} finally {
mgr.release(searcher);
}
}
return Collections.unmodifiableList(resources);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class TestGeo3DPoint method verify.
private static void verify(double[] lats, double[] lons) throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
GeoPoint[] points = new GeoPoint[lats.length];
GeoPoint[] unquantizedPoints = new GeoPoint[lats.length];
// Pre-quantize all lat/lons:
for (int i = 0; i < lats.length; i++) {
if (Double.isNaN(lats[i]) == false) {
//System.out.println("lats[" + i + "] = " + lats[i]);
unquantizedPoints[i] = new GeoPoint(PlanetModel.WGS84, toRadians(lats[i]), toRadians(lons[i]));
points[i] = quantize(unquantizedPoints[i]);
}
}
// Else we can get O(N^2) merging:
int mbd = iwc.getMaxBufferedDocs();
if (mbd != -1 && mbd < points.length / 100) {
iwc.setMaxBufferedDocs(points.length / 100);
}
iwc.setCodec(getCodec());
Directory dir;
if (points.length > 100000) {
dir = newFSDirectory(createTempDir("TestBKDTree"));
} else {
dir = getDirectory();
}
Set<Integer> deleted = new HashSet<>();
// RandomIndexWriter is too slow here:
IndexWriter w = new IndexWriter(dir, iwc);
for (int id = 0; id < points.length; id++) {
Document doc = new Document();
doc.add(newStringField("id", "" + id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", id));
GeoPoint point = points[id];
if (point != null) {
doc.add(new Geo3DPoint("point", point.x, point.y, point.z));
}
w.addDocument(doc);
if (id > 0 && random().nextInt(100) == 42) {
int idToDelete = random().nextInt(id);
w.deleteDocuments(new Term("id", "" + idToDelete));
deleted.add(idToDelete);
if (VERBOSE) {
System.err.println(" delete id=" + idToDelete);
}
}
}
if (random().nextBoolean()) {
w.forceMerge(1);
}
final IndexReader r = DirectoryReader.open(w);
if (VERBOSE) {
System.out.println("TEST: using reader " + r);
}
w.close();
// We can't wrap with "exotic" readers because the geo3d query must see the Geo3DDVFormat:
IndexSearcher s = newSearcher(r, false);
final int iters = atLeast(100);
for (int iter = 0; iter < iters; iter++) {
/*
GeoShape shape = randomShape();
if (VERBOSE) {
System.err.println("\nTEST: iter=" + iter + " shape="+shape);
}
*/
// Geo3DPoint.newShapeQuery("point", shape);
Query query = random3DQuery("point");
if (VERBOSE) {
System.err.println(" using query: " + query);
}
final FixedBitSet hits = new FixedBitSet(r.maxDoc());
s.search(query, new SimpleCollector() {
private int docBase;
@Override
public boolean needsScores() {
return false;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
docBase = context.docBase;
}
@Override
public void collect(int doc) {
hits.set(docBase + doc);
}
});
if (VERBOSE) {
System.err.println(" hitCount: " + hits.cardinality());
}
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
for (int docID = 0; docID < r.maxDoc(); docID++) {
assertEquals(docID, docIDToID.nextDoc());
int id = (int) docIDToID.longValue();
GeoPoint point = points[id];
GeoPoint unquantizedPoint = unquantizedPoints[id];
if (point != null && unquantizedPoint != null) {
GeoShape shape = ((PointInGeo3DShapeQuery) query).getShape();
XYZBounds bounds = new XYZBounds();
shape.getBounds(bounds);
XYZSolid solid = XYZSolidFactory.makeXYZSolid(PlanetModel.WGS84, bounds.getMinimumX(), bounds.getMaximumX(), bounds.getMinimumY(), bounds.getMaximumY(), bounds.getMinimumZ(), bounds.getMaximumZ());
boolean expected = ((deleted.contains(id) == false) && shape.isWithin(point));
if (hits.get(docID) != expected) {
StringBuilder b = new StringBuilder();
if (expected) {
b.append("FAIL: id=" + id + " should have matched but did not\n");
} else {
b.append("FAIL: id=" + id + " should not have matched but did\n");
}
b.append(" shape=" + shape + "\n");
b.append(" bounds=" + bounds + "\n");
b.append(" world bounds=(" + " minX=" + PlanetModel.WGS84.getMinimumXValue() + " maxX=" + PlanetModel.WGS84.getMaximumXValue() + " minY=" + PlanetModel.WGS84.getMinimumYValue() + " maxY=" + PlanetModel.WGS84.getMaximumYValue() + " minZ=" + PlanetModel.WGS84.getMinimumZValue() + " maxZ=" + PlanetModel.WGS84.getMaximumZValue() + "\n");
b.append(" quantized point=" + point + " within shape? " + shape.isWithin(point) + " within bounds? " + solid.isWithin(point) + "\n");
b.append(" unquantized point=" + unquantizedPoint + " within shape? " + shape.isWithin(unquantizedPoint) + " within bounds? " + solid.isWithin(unquantizedPoint) + "\n");
b.append(" docID=" + docID + " deleted?=" + deleted.contains(id) + "\n");
b.append(" query=" + query + "\n");
b.append(" explanation:\n " + explain("point", shape, point, unquantizedPoint, r, docID).replace("\n", "\n "));
fail(b.toString());
}
} else {
assertFalse(hits.get(docID));
}
}
}
IOUtils.close(r, dir);
}
use of org.apache.lucene.index.LeafReaderContext in project lucene-solr by apache.
the class AnalyzingInfixSuggester method createResults.
/**
* Create the results based on the search hits.
* Can be overridden by subclass to add particular behavior (e.g. weight transformation).
* Note that there is no prefix toke (the {@code prefixToken} argument will
* be null) whenever the final token in the incoming request was in fact finished
* (had trailing characters, such as white-space).
*
* @throws IOException If there are problems reading fields from the underlying Lucene index.
*/
protected List<LookupResult> createResults(IndexSearcher searcher, TopFieldDocs hits, int num, CharSequence charSequence, boolean doHighlight, Set<String> matchedTokens, String prefixToken) throws IOException {
List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
List<LookupResult> results = new ArrayList<>();
for (int i = 0; i < hits.scoreDocs.length; i++) {
FieldDoc fd = (FieldDoc) hits.scoreDocs[i];
BinaryDocValues textDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), TEXT_FIELD_NAME);
textDV.advance(fd.doc);
BytesRef term = textDV.binaryValue();
String text = term.utf8ToString();
long score = (Long) fd.fields[0];
// This will just be null if app didn't pass payloads to build():
// TODO: maybe just stored fields? they compress...
BinaryDocValues payloadsDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), "payloads");
BytesRef payload;
if (payloadsDV != null) {
if (payloadsDV.advance(fd.doc) == fd.doc) {
payload = BytesRef.deepCopyOf(payloadsDV.binaryValue());
} else {
payload = new BytesRef(BytesRef.EMPTY_BYTES);
}
} else {
payload = null;
}
// Must look up sorted-set by segment:
int segment = ReaderUtil.subIndex(fd.doc, leaves);
SortedSetDocValues contextsDV = leaves.get(segment).reader().getSortedSetDocValues(CONTEXTS_FIELD_NAME);
Set<BytesRef> contexts;
if (contextsDV != null) {
contexts = new HashSet<BytesRef>();
int targetDocID = fd.doc - leaves.get(segment).docBase;
if (contextsDV.advance(targetDocID) == targetDocID) {
long ord;
while ((ord = contextsDV.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
BytesRef context = BytesRef.deepCopyOf(contextsDV.lookupOrd(ord));
contexts.add(context);
}
}
} else {
contexts = null;
}
LookupResult result;
if (doHighlight) {
result = new LookupResult(text, highlight(text, matchedTokens, prefixToken), score, payload, contexts);
} else {
result = new LookupResult(text, score, payload, contexts);
}
results.add(result);
}
return results;
}
Aggregations