use of org.apache.lucene.index.PointValues.Relation in project lucene-solr by apache.
the class BasePointsFormatTestCase method verify.
private void verify(Directory dir, byte[][][] docValues, int[] ids, int numDims, int numBytesPerDim, boolean expectExceptions) throws Exception {
int numValues = docValues.length;
if (VERBOSE) {
System.out.println("TEST: numValues=" + numValues + " numDims=" + numDims + " numBytesPerDim=" + numBytesPerDim);
}
// RandomIndexWriter is too slow:
boolean useRealWriter = docValues.length > 10000;
IndexWriterConfig iwc;
if (useRealWriter) {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
} else {
iwc = newIndexWriterConfig();
}
if (expectExceptions) {
MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
}
}
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
DirectoryReader r = null;
// Compute actual min/max values:
byte[][] expectedMinValues = new byte[numDims][];
byte[][] expectedMaxValues = new byte[numDims][];
for (int ord = 0; ord < docValues.length; ord++) {
for (int dim = 0; dim < numDims; dim++) {
if (ord == 0) {
expectedMinValues[dim] = new byte[numBytesPerDim];
System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
expectedMaxValues[dim] = new byte[numBytesPerDim];
System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim);
} else {
// TODO: it's cheating that we use StringHelper.compare for "truth": what if it's buggy?
if (StringHelper.compare(numBytesPerDim, docValues[ord][dim], 0, expectedMinValues[dim], 0) < 0) {
System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
}
if (StringHelper.compare(numBytesPerDim, docValues[ord][dim], 0, expectedMaxValues[dim], 0) > 0) {
System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim);
}
}
}
}
// 20% of the time we add into a separate directory, then at some point use
// addIndexes to bring the indexed point values to the main directory:
Directory saveDir;
RandomIndexWriter saveW;
int addIndexesAt;
if (random().nextInt(5) == 1) {
saveDir = dir;
saveW = w;
dir = getDirectory(numValues);
if (useRealWriter) {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
} else {
iwc = newIndexWriterConfig();
}
if (expectExceptions) {
MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
}
}
w = new RandomIndexWriter(random(), dir, iwc);
addIndexesAt = TestUtil.nextInt(random(), 1, numValues - 1);
} else {
saveW = null;
saveDir = null;
addIndexesAt = 0;
}
try {
Document doc = null;
int lastID = -1;
for (int ord = 0; ord < numValues; ord++) {
int id;
if (ids == null) {
id = ord;
} else {
id = ids[ord];
}
if (id != lastID) {
if (doc != null) {
if (useRealWriter) {
w.w.addDocument(doc);
} else {
w.addDocument(doc);
}
}
doc = new Document();
doc.add(new NumericDocValuesField("id", id));
}
doc.add(new BinaryPoint("field", docValues[ord]));
lastID = id;
if (random().nextInt(30) == 17) {
// randomly index some documents without this field
if (useRealWriter) {
w.w.addDocument(new Document());
} else {
w.addDocument(new Document());
}
if (VERBOSE) {
System.out.println("add empty doc");
}
}
if (random().nextInt(30) == 17) {
// randomly index some documents with this field, but we will delete them:
Document xdoc = new Document();
xdoc.add(new BinaryPoint("field", docValues[ord]));
xdoc.add(new StringField("nukeme", "yes", Field.Store.NO));
if (useRealWriter) {
w.w.addDocument(xdoc);
} else {
w.addDocument(xdoc);
}
if (VERBOSE) {
System.out.println("add doc doc-to-delete");
}
if (random().nextInt(5) == 1) {
if (useRealWriter) {
w.w.deleteDocuments(new Term("nukeme", "yes"));
} else {
w.deleteDocuments(new Term("nukeme", "yes"));
}
}
}
if (VERBOSE) {
System.out.println(" ord=" + ord + " id=" + id);
for (int dim = 0; dim < numDims; dim++) {
System.out.println(" dim=" + dim + " value=" + new BytesRef(docValues[ord][dim]));
}
}
if (saveW != null && ord >= addIndexesAt) {
switchIndex(w, dir, saveW);
w = saveW;
dir = saveDir;
saveW = null;
saveDir = null;
}
}
w.addDocument(doc);
w.deleteDocuments(new Term("nukeme", "yes"));
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println("\nTEST: now force merge");
}
w.forceMerge(1);
}
r = w.getReader();
w.close();
if (VERBOSE) {
System.out.println("TEST: reader=" + r);
}
NumericDocValues idValues = MultiDocValues.getNumericValues(r, "id");
int[] docIDToID = new int[r.maxDoc()];
{
int docID;
while ((docID = idValues.nextDoc()) != NO_MORE_DOCS) {
docIDToID[docID] = (int) idValues.longValue();
}
}
Bits liveDocs = MultiFields.getLiveDocs(r);
// Verify min/max values are correct:
byte[] minValues = new byte[numDims * numBytesPerDim];
Arrays.fill(minValues, (byte) 0xff);
byte[] maxValues = new byte[numDims * numBytesPerDim];
for (LeafReaderContext ctx : r.leaves()) {
PointValues dimValues = ctx.reader().getPointValues("field");
if (dimValues == null) {
continue;
}
byte[] leafMinValues = dimValues.getMinPackedValue();
byte[] leafMaxValues = dimValues.getMaxPackedValue();
for (int dim = 0; dim < numDims; dim++) {
if (StringHelper.compare(numBytesPerDim, leafMinValues, dim * numBytesPerDim, minValues, dim * numBytesPerDim) < 0) {
System.arraycopy(leafMinValues, dim * numBytesPerDim, minValues, dim * numBytesPerDim, numBytesPerDim);
}
if (StringHelper.compare(numBytesPerDim, leafMaxValues, dim * numBytesPerDim, maxValues, dim * numBytesPerDim) > 0) {
System.arraycopy(leafMaxValues, dim * numBytesPerDim, maxValues, dim * numBytesPerDim, numBytesPerDim);
}
}
}
byte[] scratch = new byte[numBytesPerDim];
for (int dim = 0; dim < numDims; dim++) {
System.arraycopy(minValues, dim * numBytesPerDim, scratch, 0, numBytesPerDim);
//System.out.println("dim=" + dim + " expectedMin=" + new BytesRef(expectedMinValues[dim]) + " min=" + new BytesRef(scratch));
assertTrue(Arrays.equals(expectedMinValues[dim], scratch));
System.arraycopy(maxValues, dim * numBytesPerDim, scratch, 0, numBytesPerDim);
//System.out.println("dim=" + dim + " expectedMax=" + new BytesRef(expectedMaxValues[dim]) + " max=" + new BytesRef(scratch));
assertTrue(Arrays.equals(expectedMaxValues[dim], scratch));
}
int iters = atLeast(100);
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter);
}
// Random N dims rect query:
byte[][] queryMin = new byte[numDims][];
byte[][] queryMax = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
queryMin[dim] = new byte[numBytesPerDim];
random().nextBytes(queryMin[dim]);
queryMax[dim] = new byte[numBytesPerDim];
random().nextBytes(queryMax[dim]);
if (StringHelper.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
byte[] x = queryMin[dim];
queryMin[dim] = queryMax[dim];
queryMax[dim] = x;
}
}
if (VERBOSE) {
for (int dim = 0; dim < numDims; dim++) {
System.out.println(" dim=" + dim + "\n queryMin=" + new BytesRef(queryMin[dim]) + "\n queryMax=" + new BytesRef(queryMax[dim]));
}
}
final BitSet hits = new BitSet();
for (LeafReaderContext ctx : r.leaves()) {
PointValues dimValues = ctx.reader().getPointValues("field");
if (dimValues == null) {
continue;
}
final int docBase = ctx.docBase;
dimValues.intersect(new PointValues.IntersectVisitor() {
@Override
public void visit(int docID) {
if (liveDocs == null || liveDocs.get(docBase + docID)) {
hits.set(docIDToID[docBase + docID]);
}
//System.out.println("visit docID=" + docID);
}
@Override
public void visit(int docID, byte[] packedValue) {
if (liveDocs != null && liveDocs.get(docBase + docID) == false) {
return;
}
for (int dim = 0; dim < numDims; dim++) {
//System.out.println(" dim=" + dim + " value=" + new BytesRef(packedValue, dim*numBytesPerDim, numBytesPerDim));
if (StringHelper.compare(numBytesPerDim, packedValue, dim * numBytesPerDim, queryMin[dim], 0) < 0 || StringHelper.compare(numBytesPerDim, packedValue, dim * numBytesPerDim, queryMax[dim], 0) > 0) {
//System.out.println(" no");
return;
}
}
//System.out.println(" yes");
hits.set(docIDToID[docBase + docID]);
}
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
//System.out.println("compare");
for (int dim = 0; dim < numDims; dim++) {
if (StringHelper.compare(numBytesPerDim, maxPacked, dim * numBytesPerDim, queryMin[dim], 0) < 0 || StringHelper.compare(numBytesPerDim, minPacked, dim * numBytesPerDim, queryMax[dim], 0) > 0) {
//System.out.println(" query_outside_cell");
return Relation.CELL_OUTSIDE_QUERY;
} else if (StringHelper.compare(numBytesPerDim, minPacked, dim * numBytesPerDim, queryMin[dim], 0) < 0 || StringHelper.compare(numBytesPerDim, maxPacked, dim * numBytesPerDim, queryMax[dim], 0) > 0) {
crosses = true;
}
}
if (crosses) {
//System.out.println(" query_crosses_cell");
return Relation.CELL_CROSSES_QUERY;
} else {
//System.out.println(" cell_inside_query");
return Relation.CELL_INSIDE_QUERY;
}
}
});
}
BitSet expected = new BitSet();
for (int ord = 0; ord < numValues; ord++) {
boolean matches = true;
for (int dim = 0; dim < numDims; dim++) {
byte[] x = docValues[ord][dim];
if (StringHelper.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 || StringHelper.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
matches = false;
break;
}
}
if (matches) {
int id;
if (ids == null) {
id = ord;
} else {
id = ids[ord];
}
expected.set(id);
}
}
int limit = Math.max(expected.length(), hits.length());
int failCount = 0;
int successCount = 0;
for (int id = 0; id < limit; id++) {
if (expected.get(id) != hits.get(id)) {
System.out.println("FAIL: id=" + id);
failCount++;
} else {
successCount++;
}
}
if (failCount != 0) {
for (int docID = 0; docID < r.maxDoc(); docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
}
fail(failCount + " docs failed; " + successCount + " docs succeeded");
}
}
} finally {
IOUtils.closeWhileHandlingException(r, w, saveW, saveDir == null ? null : dir);
}
}
use of org.apache.lucene.index.PointValues.Relation in project lucene-solr by apache.
the class BasePointsFormatTestCase method testBigIntNDims.
// Tests on N-dimensional points where each dimension is a BigInteger
public void testBigIntNDims() throws Exception {
int numDocs = atLeast(1000);
try (Directory dir = getDirectory(numDocs)) {
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
// We rely on docIDs not changing:
iwc.setMergePolicy(newLogMergePolicy());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
BigInteger[][] docs = new BigInteger[numDocs][];
for (int docID = 0; docID < numDocs; docID++) {
BigInteger[] values = new BigInteger[numDims];
if (VERBOSE) {
System.out.println(" docID=" + docID);
}
byte[][] bytes = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = randomBigInt(numBytesPerDim);
bytes[dim] = new byte[numBytesPerDim];
NumericUtils.bigIntToSortableBytes(values[dim], numBytesPerDim, bytes[dim], 0);
if (VERBOSE) {
System.out.println(" " + dim + " -> " + values[dim]);
}
}
docs[docID] = values;
Document doc = new Document();
doc.add(new BinaryPoint("field", bytes));
w.addDocument(doc);
}
DirectoryReader r = w.getReader();
w.close();
int iters = atLeast(100);
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter);
}
// Random N dims rect query:
BigInteger[] queryMin = new BigInteger[numDims];
BigInteger[] queryMax = new BigInteger[numDims];
for (int dim = 0; dim < numDims; dim++) {
queryMin[dim] = randomBigInt(numBytesPerDim);
queryMax[dim] = randomBigInt(numBytesPerDim);
if (queryMin[dim].compareTo(queryMax[dim]) > 0) {
BigInteger x = queryMin[dim];
queryMin[dim] = queryMax[dim];
queryMax[dim] = x;
}
if (VERBOSE) {
System.out.println(" " + dim + "\n min=" + queryMin[dim] + "\n max=" + queryMax[dim]);
}
}
final BitSet hits = new BitSet();
for (LeafReaderContext ctx : r.leaves()) {
PointValues dimValues = ctx.reader().getPointValues("field");
if (dimValues == null) {
continue;
}
final int docBase = ctx.docBase;
dimValues.intersect(new IntersectVisitor() {
@Override
public void visit(int docID) {
hits.set(docBase + docID);
//System.out.println("visit docID=" + docID);
}
@Override
public void visit(int docID, byte[] packedValue) {
//System.out.println("visit check docID=" + docID);
for (int dim = 0; dim < numDims; dim++) {
BigInteger x = NumericUtils.sortableBytesToBigInt(packedValue, dim * numBytesPerDim, numBytesPerDim);
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
//System.out.println(" no");
return;
}
}
//System.out.println(" yes");
hits.set(docBase + docID);
}
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
for (int dim = 0; dim < numDims; dim++) {
BigInteger min = NumericUtils.sortableBytesToBigInt(minPacked, dim * numBytesPerDim, numBytesPerDim);
BigInteger max = NumericUtils.sortableBytesToBigInt(maxPacked, dim * numBytesPerDim, numBytesPerDim);
assert max.compareTo(min) >= 0;
if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
return Relation.CELL_OUTSIDE_QUERY;
} else if (min.compareTo(queryMin[dim]) < 0 || max.compareTo(queryMax[dim]) > 0) {
crosses = true;
}
}
if (crosses) {
return Relation.CELL_CROSSES_QUERY;
} else {
return Relation.CELL_INSIDE_QUERY;
}
}
});
}
for (int docID = 0; docID < numDocs; docID++) {
BigInteger[] docValues = docs[docID];
boolean expected = true;
for (int dim = 0; dim < numDims; dim++) {
BigInteger x = docValues[dim];
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
expected = false;
break;
}
}
boolean actual = hits.get(docID);
assertEquals("docID=" + docID, expected, actual);
}
}
r.close();
}
}
use of org.apache.lucene.index.PointValues.Relation in project lucene-solr by apache.
the class BasePointsFormatTestCase method testAllPointDocsDeletedInSegment.
public void testAllPointDocsDeletedInSegment() throws Exception {
Directory dir = getDirectory(20);
IndexWriterConfig iwc = newIndexWriterConfig();
IndexWriter w = new IndexWriter(dir, iwc);
byte[] point = new byte[4];
for (int i = 0; i < 10; i++) {
Document doc = new Document();
NumericUtils.intToSortableBytes(i, point, 0);
doc.add(new BinaryPoint("dim", point));
doc.add(new NumericDocValuesField("id", i));
doc.add(newStringField("x", "x", Field.Store.NO));
w.addDocument(doc);
}
w.addDocument(new Document());
w.deleteDocuments(new Term("x", "x"));
if (random().nextBoolean()) {
w.forceMerge(1);
}
w.close();
DirectoryReader r = DirectoryReader.open(dir);
assertEquals(1, r.numDocs());
Bits liveDocs = MultiFields.getLiveDocs(r);
for (LeafReaderContext ctx : r.leaves()) {
PointValues values = ctx.reader().getPointValues("dim");
NumericDocValues idValues = ctx.reader().getNumericDocValues("id");
if (idValues == null) {
// will drop the 100% deleted segments, and the "id" field never exists in the final single doc segment
continue;
}
int[] docIDToID = new int[ctx.reader().maxDoc()];
int docID;
while ((docID = idValues.nextDoc()) != NO_MORE_DOCS) {
docIDToID[docID] = (int) idValues.longValue();
}
if (values != null) {
BitSet seen = new BitSet();
values.intersect(new IntersectVisitor() {
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
return Relation.CELL_CROSSES_QUERY;
}
public void visit(int docID) {
throw new IllegalStateException();
}
public void visit(int docID, byte[] packedValue) {
if (liveDocs.get(docID)) {
seen.set(docID);
}
assertEquals(docIDToID[docID], NumericUtils.sortableBytesToInt(packedValue, 0));
}
});
assertEquals(0, seen.cardinality());
}
}
IOUtils.close(r, dir);
}
use of org.apache.lucene.index.PointValues.Relation in project lucene-solr by apache.
the class TestLucene60PointsFormat method testEstimatePointCount2Dims.
// The tree is always balanced in the N dims case, and leaves are
// not all full so things are a bit different
public void testEstimatePointCount2Dims() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
byte[][] pointValue = new byte[2][];
pointValue[0] = new byte[3];
pointValue[1] = new byte[3];
byte[][] uniquePointValue = new byte[2][];
uniquePointValue[0] = new byte[3];
uniquePointValue[1] = new byte[3];
random().nextBytes(uniquePointValue[0]);
random().nextBytes(uniquePointValue[1]);
// make sure we have several leaves
final int numDocs = atLeast(10000);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (i == numDocs / 2) {
doc.add(new BinaryPoint("f", uniquePointValue));
} else {
do {
random().nextBytes(pointValue[0]);
random().nextBytes(pointValue[1]);
} while (Arrays.equals(pointValue[0], uniquePointValue[0]) || Arrays.equals(pointValue[1], uniquePointValue[1]));
doc.add(new BinaryPoint("f", pointValue));
}
w.addDocument(doc);
}
w.forceMerge(1);
final IndexReader r = DirectoryReader.open(w);
w.close();
final LeafReader lr = getOnlyLeafReader(r);
PointValues points = lr.getPointValues("f");
// With >1 dims, the tree is balanced
int actualMaxPointsInLeafNode = numDocs;
while (actualMaxPointsInLeafNode > maxPointsInLeafNode) {
actualMaxPointsInLeafNode = (actualMaxPointsInLeafNode + 1) / 2;
}
// If all points match, then the point count is numLeaves * maxPointsInLeafNode
final int numLeaves = Integer.highestOneBit((numDocs - 1) / actualMaxPointsInLeafNode) << 1;
assertEquals(numLeaves * actualMaxPointsInLeafNode, points.estimatePointCount(new IntersectVisitor() {
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
}
@Override
public void visit(int docID) throws IOException {
}
@Override
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
return Relation.CELL_INSIDE_QUERY;
}
}));
// Return 0 if no points match
assertEquals(0, points.estimatePointCount(new IntersectVisitor() {
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
}
@Override
public void visit(int docID) throws IOException {
}
@Override
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
return Relation.CELL_OUTSIDE_QUERY;
}
}));
// If only one point matches, then the point count is (actualMaxPointsInLeafNode + 1) / 2
// in general, or maybe 2x that if the point is a split value
final long pointCount = points.estimatePointCount(new IntersectVisitor() {
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
}
@Override
public void visit(int docID) throws IOException {
}
@Override
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
for (int dim = 0; dim < 2; ++dim) {
if (StringHelper.compare(3, uniquePointValue[dim], 0, maxPackedValue, dim * 3) > 0 || StringHelper.compare(3, uniquePointValue[dim], 0, minPackedValue, dim * 3) < 0) {
return Relation.CELL_OUTSIDE_QUERY;
}
}
return Relation.CELL_CROSSES_QUERY;
}
});
assertTrue("" + pointCount, // common case
pointCount == (actualMaxPointsInLeafNode + 1) / 2 || // if the point is a split value
pointCount == 2 * ((actualMaxPointsInLeafNode + 1) / 2));
r.close();
dir.close();
}
use of org.apache.lucene.index.PointValues.Relation in project lucene-solr by apache.
the class TestBKD method testBasicInts1D.
public void testBasicInts1D() throws Exception {
try (Directory dir = getDirectory(100)) {
BKDWriter w = new BKDWriter(100, dir, "tmp", 1, 4, 2, 1.0f, 100, true);
byte[] scratch = new byte[4];
for (int docID = 0; docID < 100; docID++) {
NumericUtils.intToSortableBytes(docID, scratch, 0);
w.add(scratch, docID);
}
long indexFP;
try (IndexOutput out = dir.createOutput("bkd", IOContext.DEFAULT)) {
indexFP = w.finish(out);
}
try (IndexInput in = dir.openInput("bkd", IOContext.DEFAULT)) {
in.seek(indexFP);
BKDReader r = new BKDReader(in);
// Simple 1D range query:
final int queryMin = 42;
final int queryMax = 87;
final BitSet hits = new BitSet();
r.intersect(new IntersectVisitor() {
@Override
public void visit(int docID) {
hits.set(docID);
if (VERBOSE) {
System.out.println("visit docID=" + docID);
}
}
@Override
public void visit(int docID, byte[] packedValue) {
int x = NumericUtils.sortableBytesToInt(packedValue, 0);
if (VERBOSE) {
System.out.println("visit docID=" + docID + " x=" + x);
}
if (x >= queryMin && x <= queryMax) {
hits.set(docID);
}
}
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
int min = NumericUtils.sortableBytesToInt(minPacked, 0);
int max = NumericUtils.sortableBytesToInt(maxPacked, 0);
assert max >= min;
if (VERBOSE) {
System.out.println("compare: min=" + min + " max=" + max + " vs queryMin=" + queryMin + " queryMax=" + queryMax);
}
if (max < queryMin || min > queryMax) {
return Relation.CELL_OUTSIDE_QUERY;
} else if (min >= queryMin && max <= queryMax) {
return Relation.CELL_INSIDE_QUERY;
} else {
return Relation.CELL_CROSSES_QUERY;
}
}
});
for (int docID = 0; docID < 100; docID++) {
boolean expected = docID >= queryMin && docID <= queryMax;
boolean actual = hits.get(docID);
assertEquals("docID=" + docID, expected, actual);
}
}
}
}
Aggregations