use of org.apache.lucene.document.IntPoint in project elasticsearch by elastic.
the class ValueCountAggregatorTests method testQueryFiltersAll.
public void testQueryFiltersAll() throws IOException {
testCase(IntPoint.newRangeQuery("level", -1, 0), ValueType.STRING, iw -> {
iw.addDocument(Arrays.asList(new IntPoint("level", 3), new SortedDocValuesField(FIELD_NAME, new BytesRef("foo"))));
iw.addDocument(Arrays.asList(new IntPoint("level", 5), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz"))));
}, count -> assertEquals(0L, count.getValue()));
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestFieldCacheSortRandom method testRandomStringSort.
private void testRandomStringSort(SortField.Type type) throws Exception {
Random random = new Random(random().nextLong());
final int NUM_DOCS = atLeast(100);
final Directory dir = newDirectory();
final RandomIndexWriter writer = new RandomIndexWriter(random, dir);
final boolean allowDups = random.nextBoolean();
final Set<String> seen = new HashSet<>();
final int maxLength = TestUtil.nextInt(random, 5, 100);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS + " maxLength=" + maxLength + " allowDups=" + allowDups);
}
int numDocs = 0;
final List<BytesRef> docValues = new ArrayList<>();
// TODO: deletions
while (numDocs < NUM_DOCS) {
final Document doc = new Document();
// 10% of the time, the document is missing the value:
final BytesRef br;
if (random().nextInt(10) != 7) {
final String s;
if (random.nextBoolean()) {
s = TestUtil.randomSimpleString(random, maxLength);
} else {
s = TestUtil.randomUnicodeString(random, maxLength);
}
if (!allowDups) {
if (seen.contains(s)) {
continue;
}
seen.add(s);
}
if (VERBOSE) {
System.out.println(" " + numDocs + ": s=" + s);
}
doc.add(new StringField("stringdv", s, Field.Store.NO));
docValues.add(new BytesRef(s));
} else {
br = null;
if (VERBOSE) {
System.out.println(" " + numDocs + ": <missing>");
}
docValues.add(null);
}
doc.add(new IntPoint("id", numDocs));
doc.add(new StoredField("id", numDocs));
writer.addDocument(doc);
numDocs++;
if (random.nextInt(40) == 17) {
// force flush
writer.getReader().close();
}
}
Map<String, UninvertingReader.Type> mapping = new HashMap<>();
mapping.put("stringdv", Type.SORTED);
mapping.put("id", Type.INTEGER_POINT);
final IndexReader r = UninvertingReader.wrap(writer.getReader(), mapping);
writer.close();
if (VERBOSE) {
System.out.println(" reader=" + r);
}
final IndexSearcher s = newSearcher(r, false);
final int ITERS = atLeast(100);
for (int iter = 0; iter < ITERS; iter++) {
final boolean reverse = random.nextBoolean();
final TopFieldDocs hits;
final SortField sf;
final boolean sortMissingLast;
final boolean missingIsNull;
sf = new SortField("stringdv", type, reverse);
sortMissingLast = random().nextBoolean();
missingIsNull = true;
if (sortMissingLast) {
sf.setMissingValue(SortField.STRING_LAST);
}
final Sort sort;
if (random.nextBoolean()) {
sort = new Sort(sf);
} else {
sort = new Sort(sf, SortField.FIELD_DOC);
}
final int hitCount = TestUtil.nextInt(random, 1, r.maxDoc() + 20);
final RandomQuery f = new RandomQuery(random.nextLong(), random.nextFloat(), docValues);
int queryType = random.nextInt(2);
if (queryType == 0) {
hits = s.search(new ConstantScoreQuery(f), hitCount, sort, random.nextBoolean(), random.nextBoolean());
} else {
hits = s.search(f, hitCount, sort, random.nextBoolean(), random.nextBoolean());
}
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " " + hits.totalHits + " hits; topN=" + hitCount + "; reverse=" + reverse + "; sortMissingLast=" + sortMissingLast + " sort=" + sort);
}
// Compute expected results:
Collections.sort(f.matchValues, new Comparator<BytesRef>() {
@Override
public int compare(BytesRef a, BytesRef b) {
if (a == null) {
if (b == null) {
return 0;
}
if (sortMissingLast) {
return 1;
} else {
return -1;
}
} else if (b == null) {
if (sortMissingLast) {
return -1;
} else {
return 1;
}
} else {
return a.compareTo(b);
}
}
});
if (reverse) {
Collections.reverse(f.matchValues);
}
final List<BytesRef> expected = f.matchValues;
if (VERBOSE) {
System.out.println(" expected:");
for (int idx = 0; idx < expected.size(); idx++) {
BytesRef br = expected.get(idx);
if (br == null && missingIsNull == false) {
br = new BytesRef();
}
System.out.println(" " + idx + ": " + (br == null ? "<missing>" : br.utf8ToString()));
if (idx == hitCount - 1) {
break;
}
}
}
if (VERBOSE) {
System.out.println(" actual:");
for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) {
final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
BytesRef br = (BytesRef) fd.fields[0];
System.out.println(" " + hitIDX + ": " + (br == null ? "<missing>" : br.utf8ToString()) + " id=" + s.doc(fd.doc).get("id"));
}
}
for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) {
final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
BytesRef br = expected.get(hitIDX);
if (br == null && missingIsNull == false) {
br = new BytesRef();
}
// Normally, the old codecs (that don't support
// docsWithField via doc values) will always return
// an empty BytesRef for the missing case; however,
// if all docs in a given segment were missing, in
// that case it will return null! So we must map
// null here, too:
BytesRef br2 = (BytesRef) fd.fields[0];
if (br2 == null && missingIsNull == false) {
br2 = new BytesRef();
}
assertEquals(br, br2);
}
}
r.close();
dir.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestUninvertingReader method testFieldInfos.
public void testFieldInfos() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
BytesRef idBytes = new BytesRef("id");
doc.add(new StringField("id", idBytes, Store.YES));
doc.add(new LegacyIntField("int", 5, Store.YES));
doc.add(new NumericDocValuesField("dv", 5));
doc.add(new IntPoint("dint", 5));
// not indexed
doc.add(new StoredField("stored", 5));
iw.addDocument(doc);
iw.forceMerge(1);
iw.close();
Map<String, Type> uninvertingMap = new HashMap<>();
uninvertingMap.put("int", Type.LEGACY_INTEGER);
uninvertingMap.put("dv", Type.LEGACY_INTEGER);
uninvertingMap.put("dint", Type.INTEGER_POINT);
DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), uninvertingMap);
LeafReader leafReader = ir.leaves().get(0).reader();
FieldInfo intFInfo = leafReader.getFieldInfos().fieldInfo("int");
assertEquals(DocValuesType.NUMERIC, intFInfo.getDocValuesType());
assertEquals(0, intFInfo.getPointDimensionCount());
assertEquals(0, intFInfo.getPointNumBytes());
FieldInfo dintFInfo = leafReader.getFieldInfos().fieldInfo("dint");
assertEquals(DocValuesType.NUMERIC, dintFInfo.getDocValuesType());
assertEquals(1, dintFInfo.getPointDimensionCount());
assertEquals(4, dintFInfo.getPointNumBytes());
FieldInfo dvFInfo = leafReader.getFieldInfos().fieldInfo("dv");
assertEquals(DocValuesType.NUMERIC, dvFInfo.getDocValuesType());
FieldInfo storedFInfo = leafReader.getFieldInfos().fieldInfo("stored");
assertEquals(DocValuesType.NONE, storedFInfo.getDocValuesType());
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestFieldCacheReopen method testFieldCacheReuseAfterReopen.
// TODO: make a version of this that tests the same thing with UninvertingReader.wrap()
// LUCENE-1579: Ensure that on a reopened reader, that any
// shared segments reuse the doc values arrays in
// FieldCache
public void testFieldCacheReuseAfterReopen() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(10)));
Document doc = new Document();
doc.add(new IntPoint("number", 17));
writer.addDocument(doc);
writer.commit();
// Open reader1
DirectoryReader r = DirectoryReader.open(dir);
LeafReader r1 = getOnlyLeafReader(r);
final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.INT_POINT_PARSER);
assertEquals(0, ints.nextDoc());
assertEquals(17, ints.longValue());
// Add new segment
writer.addDocument(doc);
writer.commit();
// Reopen reader1 --> reader2
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
assertNotNull(r2);
r.close();
LeafReader sub0 = r2.leaves().get(0).reader();
final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.INT_POINT_PARSER);
r2.close();
assertEquals(0, ints2.nextDoc());
assertEquals(17, ints2.longValue());
writer.close();
dir.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestFieldCacheSort method testIntReverse.
/** Tests sorting on type int in reverse */
public void testIntReverse() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new IntPoint("value", 300000));
doc.add(new StoredField("value", 300000));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("value", -1));
doc.add(new StoredField("value", -1));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("value", 4));
doc.add(new StoredField("value", 4));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.INTEGER_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(3, td.totalHits);
// reverse numeric order
assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
Aggregations