use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestFieldCacheSort method testDoubleSignedZero.
/** Tests sorting on type double with +/- zero */
public void testDoubleSignedZero() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoublePoint("value", +0d));
doc.add(new StoredField("value", +0d));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", -0d));
doc.add(new StoredField("value", -0d));
writer.addDocument(doc);
doc = new Document();
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.DOUBLE_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(2, td.totalHits);
// numeric order
double v0 = searcher.doc(td.scoreDocs[0].doc).getField("value").numericValue().doubleValue();
double v1 = searcher.doc(td.scoreDocs[1].doc).getField("value").numericValue().doubleValue();
assertEquals(0, v0, 0d);
assertEquals(0, v1, 0d);
// check sign bits
assertEquals(1, Double.doubleToLongBits(v0) >>> 63);
assertEquals(0, Double.doubleToLongBits(v1) >>> 63);
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestFieldCacheSort method testDoubleReverse.
/** Tests sorting on type double in reverse */
public void testDoubleReverse() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoublePoint("value", 30.1));
doc.add(new StoredField("value", 30.1));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", -1.3));
doc.add(new StoredField("value", -1.3));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333333));
doc.add(new StoredField("value", 4.2333333333333));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333332));
doc.add(new StoredField("value", 4.2333333333332));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.DOUBLE_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// numeric order
assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestFieldCacheSort method testDoubleMissing.
/** Tests sorting on type double with a missing value */
public void testDoubleMissing() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", -1.3));
doc.add(new StoredField("value", -1.3));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333333));
doc.add(new StoredField("value", 4.2333333333333));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333332));
doc.add(new StoredField("value", 4.2333333333332));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.DOUBLE_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// null treated as a 0
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class TestFieldCacheSort method testDoubleMissingLast.
/** Tests sorting on type double, specifying the missing value should be treated as Double.MAX_VALUE */
public void testDoubleMissingLast() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", -1.3));
doc.add(new StoredField("value", -1.3));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333333));
doc.add(new StoredField("value", 4.2333333333333));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333332));
doc.add(new StoredField("value", 4.2333333333332));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.DOUBLE_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
SortField sortField = new SortField("value", SortField.Type.DOUBLE);
sortField.setMissingValue(Double.MAX_VALUE);
Sort sort = new Sort(sortField);
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// null treated as Double.MAX_VALUE
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.DoublePoint in project lucene-solr by apache.
the class JoinUtil method createJoinQuery.
/**
* Method for query time joining for numeric fields. It supports multi- and single- values longs, ints, floats and longs.
* All considerations from {@link JoinUtil#createJoinQuery(String, boolean, String, Query, IndexSearcher, ScoreMode)} are applicable here too,
* though memory consumption might be higher.
* <p>
*
* @param fromField The from field to join from
* @param multipleValuesPerDocument Whether the from field has multiple terms per document
* when true fromField might be {@link DocValuesType#SORTED_NUMERIC},
* otherwise fromField should be {@link DocValuesType#NUMERIC}
* @param toField The to field to join to, should be {@link IntPoint}, {@link LongPoint}, {@link FloatPoint}
* or {@link DoublePoint}.
* @param numericType either {@link java.lang.Integer}, {@link java.lang.Long}, {@link java.lang.Float}
* or {@link java.lang.Double} it should correspond to toField types
* @param fromQuery The query to match documents on the from side
* @param fromSearcher The searcher that executed the specified fromQuery
* @param scoreMode Instructs how scores from the fromQuery are mapped to the returned query
* @return a {@link Query} instance that can be used to join documents based on the
* terms in the from and to field
* @throws IOException If I/O related errors occur
*/
public static Query createJoinQuery(String fromField, boolean multipleValuesPerDocument, String toField, Class<? extends Number> numericType, Query fromQuery, IndexSearcher fromSearcher, ScoreMode scoreMode) throws IOException {
TreeSet<Long> joinValues = new TreeSet<>();
Map<Long, Float> aggregatedScores = new HashMap<>();
Map<Long, Integer> occurrences = new HashMap<>();
boolean needsScore = scoreMode != ScoreMode.None;
BiConsumer<Long, Float> scoreAggregator;
if (scoreMode == ScoreMode.Max) {
scoreAggregator = (key, score) -> {
Float currentValue = aggregatedScores.putIfAbsent(key, score);
if (currentValue != null) {
aggregatedScores.put(key, Math.max(currentValue, score));
}
};
} else if (scoreMode == ScoreMode.Min) {
scoreAggregator = (key, score) -> {
Float currentValue = aggregatedScores.putIfAbsent(key, score);
if (currentValue != null) {
aggregatedScores.put(key, Math.min(currentValue, score));
}
};
} else if (scoreMode == ScoreMode.Total) {
scoreAggregator = (key, score) -> {
Float currentValue = aggregatedScores.putIfAbsent(key, score);
if (currentValue != null) {
aggregatedScores.put(key, currentValue + score);
}
};
} else if (scoreMode == ScoreMode.Avg) {
scoreAggregator = (key, score) -> {
Float currentSore = aggregatedScores.putIfAbsent(key, score);
if (currentSore != null) {
aggregatedScores.put(key, currentSore + score);
}
Integer currentOccurrence = occurrences.putIfAbsent(key, 1);
if (currentOccurrence != null) {
occurrences.put(key, ++currentOccurrence);
}
};
} else {
scoreAggregator = (key, score) -> {
throw new UnsupportedOperationException();
};
}
LongFunction<Float> joinScorer;
if (scoreMode == ScoreMode.Avg) {
joinScorer = (joinValue) -> {
Float aggregatedScore = aggregatedScores.get(joinValue);
Integer occurrence = occurrences.get(joinValue);
return aggregatedScore / occurrence;
};
} else {
joinScorer = aggregatedScores::get;
}
Collector collector;
if (multipleValuesPerDocument) {
collector = new SimpleCollector() {
SortedNumericDocValues sortedNumericDocValues;
Scorer scorer;
@Override
public void collect(int doc) throws IOException {
if (doc > sortedNumericDocValues.docID()) {
sortedNumericDocValues.advance(doc);
}
if (doc == sortedNumericDocValues.docID()) {
for (int i = 0; i < sortedNumericDocValues.docValueCount(); i++) {
long value = sortedNumericDocValues.nextValue();
joinValues.add(value);
if (needsScore) {
scoreAggregator.accept(value, scorer.score());
}
}
}
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
sortedNumericDocValues = DocValues.getSortedNumeric(context.reader(), fromField);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public boolean needsScores() {
return needsScore;
}
};
} else {
collector = new SimpleCollector() {
NumericDocValues numericDocValues;
Scorer scorer;
private int lastDocID = -1;
private boolean docsInOrder(int docID) {
if (docID < lastDocID) {
throw new AssertionError("docs out of order: lastDocID=" + lastDocID + " vs docID=" + docID);
}
lastDocID = docID;
return true;
}
@Override
public void collect(int doc) throws IOException {
assert docsInOrder(doc);
int dvDocID = numericDocValues.docID();
if (dvDocID < doc) {
dvDocID = numericDocValues.advance(doc);
}
long value;
if (dvDocID == doc) {
value = numericDocValues.longValue();
} else {
value = 0;
}
joinValues.add(value);
if (needsScore) {
scoreAggregator.accept(value, scorer.score());
}
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
numericDocValues = DocValues.getNumeric(context.reader(), fromField);
lastDocID = -1;
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public boolean needsScores() {
return needsScore;
}
};
}
fromSearcher.search(fromQuery, collector);
Iterator<Long> iterator = joinValues.iterator();
final int bytesPerDim;
final BytesRef encoded = new BytesRef();
final PointInSetIncludingScoreQuery.Stream stream;
if (Integer.class.equals(numericType)) {
bytesPerDim = Integer.BYTES;
stream = new PointInSetIncludingScoreQuery.Stream() {
@Override
public BytesRef next() {
if (iterator.hasNext()) {
long value = iterator.next();
IntPoint.encodeDimension((int) value, encoded.bytes, 0);
if (needsScore) {
score = joinScorer.apply(value);
}
return encoded;
} else {
return null;
}
}
};
} else if (Long.class.equals(numericType)) {
bytesPerDim = Long.BYTES;
stream = new PointInSetIncludingScoreQuery.Stream() {
@Override
public BytesRef next() {
if (iterator.hasNext()) {
long value = iterator.next();
LongPoint.encodeDimension(value, encoded.bytes, 0);
if (needsScore) {
score = joinScorer.apply(value);
}
return encoded;
} else {
return null;
}
}
};
} else if (Float.class.equals(numericType)) {
bytesPerDim = Float.BYTES;
stream = new PointInSetIncludingScoreQuery.Stream() {
@Override
public BytesRef next() {
if (iterator.hasNext()) {
long value = iterator.next();
FloatPoint.encodeDimension(Float.intBitsToFloat((int) value), encoded.bytes, 0);
if (needsScore) {
score = joinScorer.apply(value);
}
return encoded;
} else {
return null;
}
}
};
} else if (Double.class.equals(numericType)) {
bytesPerDim = Double.BYTES;
stream = new PointInSetIncludingScoreQuery.Stream() {
@Override
public BytesRef next() {
if (iterator.hasNext()) {
long value = iterator.next();
DoublePoint.encodeDimension(Double.longBitsToDouble(value), encoded.bytes, 0);
if (needsScore) {
score = joinScorer.apply(value);
}
return encoded;
} else {
return null;
}
}
};
} else {
throw new IllegalArgumentException("unsupported numeric type, only Integer, Long, Float and Double are supported");
}
encoded.bytes = new byte[bytesPerDim];
encoded.length = bytesPerDim;
if (needsScore) {
return new PointInSetIncludingScoreQuery(scoreMode, fromQuery, multipleValuesPerDocument, toField, bytesPerDim, stream) {
@Override
protected String toString(byte[] value) {
return toString.apply(value, numericType);
}
};
} else {
return new PointInSetQuery(toField, 1, bytesPerDim, stream) {
@Override
protected String toString(byte[] value) {
return PointInSetIncludingScoreQuery.toString.apply(value, numericType);
}
};
}
}
Aggregations