use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestMemoryIndexAgainstRAMDir method testPointValuesMemoryIndexVsNormalIndex.
public void testPointValuesMemoryIndexVsNormalIndex() throws Exception {
int size = atLeast(12);
List<Integer> randomValues = new ArrayList<>();
Document doc = new Document();
for (Integer randomInteger : random().ints(size).toArray()) {
doc.add(new IntPoint("int", randomInteger));
randomValues.add(randomInteger);
doc.add(new LongPoint("long", randomInteger));
doc.add(new FloatPoint("float", randomInteger));
doc.add(new DoublePoint("double", randomInteger));
}
MockAnalyzer mockAnalyzer = new MockAnalyzer(random());
MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc, mockAnalyzer);
IndexSearcher memoryIndexSearcher = memoryIndex.createSearcher();
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), mockAnalyzer));
writer.addDocument(doc);
writer.close();
IndexReader controlIndexReader = DirectoryReader.open(dir);
IndexSearcher controlIndexSearcher = new IndexSearcher(controlIndexReader);
Supplier<Integer> valueSupplier = () -> randomValues.get(random().nextInt(randomValues.size()));
Query[] queries = new Query[] { IntPoint.newExactQuery("int", valueSupplier.get()), LongPoint.newExactQuery("long", valueSupplier.get()), FloatPoint.newExactQuery("float", valueSupplier.get()), DoublePoint.newExactQuery("double", valueSupplier.get()), IntPoint.newSetQuery("int", valueSupplier.get(), valueSupplier.get()), LongPoint.newSetQuery("long", valueSupplier.get(), valueSupplier.get()), FloatPoint.newSetQuery("float", valueSupplier.get(), valueSupplier.get()), DoublePoint.newSetQuery("double", valueSupplier.get(), valueSupplier.get()), IntPoint.newRangeQuery("int", valueSupplier.get(), valueSupplier.get()), LongPoint.newRangeQuery("long", valueSupplier.get(), valueSupplier.get()), FloatPoint.newRangeQuery("float", valueSupplier.get(), valueSupplier.get()), DoublePoint.newRangeQuery("double", valueSupplier.get(), valueSupplier.get()) };
for (Query query : queries) {
assertEquals(controlIndexSearcher.count(query), controlIndexSearcher.count(query));
}
memoryIndexSearcher.getIndexReader().close();
controlIndexReader.close();
dir.close();
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestMemoryIndex method testPointValues.
public void testPointValues() throws Exception {
List<Function<Long, IndexableField>> fieldFunctions = Arrays.asList((t) -> new IntPoint("number", t.intValue()), (t) -> new LongPoint("number", t), (t) -> new FloatPoint("number", t.floatValue()), (t) -> new DoublePoint("number", t.doubleValue()));
List<Function<Long, Query>> exactQueryFunctions = Arrays.asList((t) -> IntPoint.newExactQuery("number", t.intValue()), (t) -> LongPoint.newExactQuery("number", t), (t) -> FloatPoint.newExactQuery("number", t.floatValue()), (t) -> DoublePoint.newExactQuery("number", t.doubleValue()));
List<Function<long[], Query>> setQueryFunctions = Arrays.asList((t) -> IntPoint.newSetQuery("number", LongStream.of(t).mapToInt(value -> (int) value).toArray()), (t) -> LongPoint.newSetQuery("number", t), (t) -> FloatPoint.newSetQuery("number", Arrays.asList(LongStream.of(t).mapToObj(value -> (float) value).toArray(Float[]::new))), (t) -> DoublePoint.newSetQuery("number", LongStream.of(t).mapToDouble(value -> (double) value).toArray()));
List<BiFunction<Long, Long, Query>> rangeQueryFunctions = Arrays.asList((t, u) -> IntPoint.newRangeQuery("number", t.intValue(), u.intValue()), (t, u) -> LongPoint.newRangeQuery("number", t, u), (t, u) -> FloatPoint.newRangeQuery("number", t.floatValue(), u.floatValue()), (t, u) -> DoublePoint.newRangeQuery("number", t.doubleValue(), u.doubleValue()));
for (int i = 0; i < fieldFunctions.size(); i++) {
Function<Long, IndexableField> fieldFunction = fieldFunctions.get(i);
Function<Long, Query> exactQueryFunction = exactQueryFunctions.get(i);
Function<long[], Query> setQueryFunction = setQueryFunctions.get(i);
BiFunction<Long, Long, Query> rangeQueryFunction = rangeQueryFunctions.get(i);
Document doc = new Document();
for (int number = 1; number < 32; number += 2) {
doc.add(fieldFunction.apply((long) number));
}
MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer);
IndexSearcher indexSearcher = mi.createSearcher();
Query query = exactQueryFunction.apply(5L);
assertEquals(1, indexSearcher.count(query));
query = exactQueryFunction.apply(4L);
assertEquals(0, indexSearcher.count(query));
query = setQueryFunction.apply(new long[] { 3L, 9L, 19L });
assertEquals(1, indexSearcher.count(query));
query = setQueryFunction.apply(new long[] { 2L, 8L, 13L });
assertEquals(1, indexSearcher.count(query));
query = setQueryFunction.apply(new long[] { 2L, 8L, 16L });
assertEquals(0, indexSearcher.count(query));
query = rangeQueryFunction.apply(2L, 16L);
assertEquals(1, indexSearcher.count(query));
query = rangeQueryFunction.apply(24L, 48L);
assertEquals(1, indexSearcher.count(query));
query = rangeQueryFunction.apply(48L, 68L);
assertEquals(0, indexSearcher.count(query));
}
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestMemoryIndex method test2DPoints.
public void test2DPoints() throws Exception {
Document doc = new Document();
doc.add(new IntPoint("ints", 0, -100));
doc.add(new IntPoint("ints", 20, 20));
doc.add(new IntPoint("ints", 100, -100));
doc.add(new LongPoint("longs", 0L, -100L));
doc.add(new LongPoint("longs", 20L, 20L));
doc.add(new LongPoint("longs", 100L, -100L));
doc.add(new FloatPoint("floats", 0F, -100F));
doc.add(new FloatPoint("floats", 20F, 20F));
doc.add(new FloatPoint("floats", 100F, -100F));
doc.add(new DoublePoint("doubles", 0D, -100D));
doc.add(new DoublePoint("doubles", 20D, 20D));
doc.add(new DoublePoint("doubles", 100D, -100D));
MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer);
IndexSearcher s = mi.createSearcher();
assertEquals(1, s.count(IntPoint.newRangeQuery("ints", new int[] { 10, 10 }, new int[] { 30, 30 })));
assertEquals(1, s.count(LongPoint.newRangeQuery("longs", new long[] { 10L, 10L }, new long[] { 30L, 30L })));
assertEquals(1, s.count(FloatPoint.newRangeQuery("floats", new float[] { 10F, 10F }, new float[] { 30F, 30F })));
assertEquals(1, s.count(DoublePoint.newRangeQuery("doubles", new double[] { 10D, 10D }, new double[] { 30D, 30D })));
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class BBoxStrategy method createIndexableFields.
private Field[] createIndexableFields(Rectangle bbox) {
Field[] fields = new Field[fieldsLen];
int idx = -1;
if (hasStored) {
fields[++idx] = new StoredField(field_minX, bbox.getMinX());
fields[++idx] = new StoredField(field_minY, bbox.getMinY());
fields[++idx] = new StoredField(field_maxX, bbox.getMaxX());
fields[++idx] = new StoredField(field_maxY, bbox.getMaxY());
}
if (hasDocVals) {
fields[++idx] = new DoubleDocValuesField(field_minX, bbox.getMinX());
fields[++idx] = new DoubleDocValuesField(field_minY, bbox.getMinY());
fields[++idx] = new DoubleDocValuesField(field_maxX, bbox.getMaxX());
fields[++idx] = new DoubleDocValuesField(field_maxY, bbox.getMaxY());
}
if (hasPointVals) {
fields[++idx] = new DoublePoint(field_minX, bbox.getMinX());
fields[++idx] = new DoublePoint(field_minY, bbox.getMinY());
fields[++idx] = new DoublePoint(field_maxX, bbox.getMaxX());
fields[++idx] = new DoublePoint(field_maxY, bbox.getMaxY());
}
if (xdlFieldType != null) {
fields[++idx] = new Field(field_xdl, bbox.getCrossesDateLine() ? "T" : "F", xdlFieldType);
}
assert idx == fields.length - 1;
return fields;
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class BaseTestRangeFilter method build.
private static IndexReader build(Random random, TestIndex index) throws IOException {
/* build an index */
Document doc = new Document();
Field idField = newStringField(random, "id", "", Field.Store.YES);
Field idDVField = new SortedDocValuesField("id", new BytesRef());
Field intIdField = new IntPoint("id_int", 0);
Field intDVField = new NumericDocValuesField("id_int", 0);
Field floatIdField = new FloatPoint("id_float", 0);
Field floatDVField = new NumericDocValuesField("id_float", 0);
Field longIdField = new LongPoint("id_long", 0);
Field longDVField = new NumericDocValuesField("id_long", 0);
Field doubleIdField = new DoublePoint("id_double", 0);
Field doubleDVField = new NumericDocValuesField("id_double", 0);
Field randField = newStringField(random, "rand", "", Field.Store.YES);
Field randDVField = new SortedDocValuesField("rand", new BytesRef());
Field bodyField = newStringField(random, "body", "", Field.Store.NO);
Field bodyDVField = new SortedDocValuesField("body", new BytesRef());
doc.add(idField);
doc.add(idDVField);
doc.add(intIdField);
doc.add(intDVField);
doc.add(floatIdField);
doc.add(floatDVField);
doc.add(longIdField);
doc.add(longDVField);
doc.add(doubleIdField);
doc.add(doubleDVField);
doc.add(randField);
doc.add(randDVField);
doc.add(bodyField);
doc.add(bodyDVField);
RandomIndexWriter writer = new RandomIndexWriter(random, index.index, newIndexWriterConfig(random, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
TestUtil.reduceOpenFiles(writer.w);
while (true) {
int minCount = 0;
int maxCount = 0;
for (int d = minId; d <= maxId; d++) {
idField.setStringValue(pad(d));
idDVField.setBytesValue(new BytesRef(pad(d)));
intIdField.setIntValue(d);
intDVField.setLongValue(d);
floatIdField.setFloatValue(d);
floatDVField.setLongValue(Float.floatToRawIntBits(d));
longIdField.setLongValue(d);
longDVField.setLongValue(d);
doubleIdField.setDoubleValue(d);
doubleDVField.setLongValue(Double.doubleToRawLongBits(d));
int r = index.allowNegativeRandomInts ? random.nextInt() : random.nextInt(Integer.MAX_VALUE);
if (index.maxR < r) {
index.maxR = r;
maxCount = 1;
} else if (index.maxR == r) {
maxCount++;
}
if (r < index.minR) {
index.minR = r;
minCount = 1;
} else if (r == index.minR) {
minCount++;
}
randField.setStringValue(pad(r));
randDVField.setBytesValue(new BytesRef(pad(r)));
bodyField.setStringValue("body");
bodyDVField.setBytesValue(new BytesRef("body"));
writer.addDocument(doc);
}
if (minCount == 1 && maxCount == 1) {
// our subclasses rely on only 1 doc having the min or
// max, so, we loop until we satisfy that. it should be
// exceedingly rare (Yonik calculates 1 in ~429,000)
// times) that this loop requires more than one try:
IndexReader ir = writer.getReader();
writer.close();
return ir;
}
// try again
writer.deleteAll();
}
}
Aggregations