use of org.apache.lucene.document.StoredField in project lucene-solr by apache.
the class TestFieldCacheSort method testDoubleMissingLast.
/** Tests sorting on type double, specifying the missing value should be treated as Double.MAX_VALUE */
public void testDoubleMissingLast() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", -1.3));
doc.add(new StoredField("value", -1.3));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333333));
doc.add(new StoredField("value", 4.2333333333333));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("value", 4.2333333333332));
doc.add(new StoredField("value", 4.2333333333332));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.DOUBLE_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
SortField sortField = new SortField("value", SortField.Type.DOUBLE);
sortField.setMissingValue(Double.MAX_VALUE);
Sort sort = new Sort(sortField);
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// null treated as Double.MAX_VALUE
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.StoredField in project lucene-solr by apache.
the class TestFieldCacheSort method testIntReverse.
/** Tests sorting on type int in reverse */
public void testIntReverse() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new IntPoint("value", 300000));
doc.add(new StoredField("value", 300000));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("value", -1));
doc.add(new StoredField("value", -1));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("value", 4));
doc.add(new StoredField("value", 4));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(), Collections.singletonMap("value", Type.INTEGER_POINT));
writer.close();
IndexSearcher searcher = newSearcher(ir, false);
Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(3, td.totalHits);
// reverse numeric order
assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
TestUtil.checkReader(ir);
ir.close();
dir.close();
}
use of org.apache.lucene.document.StoredField in project lucene-solr by apache.
the class TestCompressingStoredFieldsFormat method testChunkCleanup.
/**
* writes some tiny segments with incomplete compressed blocks,
* and ensures merge recompresses them.
*/
public void testChunkCleanup() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random()));
iwConf.setMergePolicy(NoMergePolicy.INSTANCE);
// we have to enforce certain things like maxDocsPerChunk to cause dirty chunks to be created
// by this test.
iwConf.setCodec(CompressingCodec.randomInstance(random(), 4 * 1024, 100, false, 8));
IndexWriter iw = new IndexWriter(dir, iwConf);
DirectoryReader ir = DirectoryReader.open(iw);
for (int i = 0; i < 5; i++) {
Document doc = new Document();
doc.add(new StoredField("text", "not very long at all"));
iw.addDocument(doc);
// force flush
DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);
assertNotNull(ir2);
ir.close();
ir = ir2;
// examine dirty counts:
for (LeafReaderContext leaf : ir2.leaves()) {
CodecReader sr = (CodecReader) leaf.reader();
CompressingStoredFieldsReader reader = (CompressingStoredFieldsReader) sr.getFieldsReader();
assertEquals(1, reader.getNumChunks());
assertEquals(1, reader.getNumDirtyChunks());
}
}
iw.getConfig().setMergePolicy(newLogMergePolicy());
iw.forceMerge(1);
DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);
assertNotNull(ir2);
ir.close();
ir = ir2;
CodecReader sr = (CodecReader) getOnlyLeafReader(ir);
CompressingStoredFieldsReader reader = (CompressingStoredFieldsReader) sr.getFieldsReader();
// we could get lucky, and have zero, but typically one.
assertTrue(reader.getNumDirtyChunks() <= 1);
ir.close();
iw.close();
dir.close();
}
use of org.apache.lucene.document.StoredField in project lucene-solr by apache.
the class TestBlockJoin method makeJob.
// ... has multiple jobs
private Document makeJob(String skill, int year) {
Document job = new Document();
job.add(newStringField("skill", skill, Field.Store.YES));
job.add(new IntPoint("year", year));
job.add(new StoredField("year", year));
return job;
}
use of org.apache.lucene.document.StoredField in project lucene-solr by apache.
the class TestBlockJoin method testChildQueryNeverMatches.
// LUCENE-4968
public void testChildQueryNeverMatches() throws Exception {
Directory d = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), d);
Document parent = new Document();
parent.add(new StoredField("parentID", "0"));
parent.add(new SortedDocValuesField("parentID", new BytesRef("0")));
parent.add(newTextField("parentText", "text", Field.Store.NO));
parent.add(newStringField("isParent", "yes", Field.Store.NO));
List<Document> docs = new ArrayList<>();
Document child = new Document();
docs.add(child);
child.add(new StoredField("childID", "0"));
child.add(newTextField("childText", "text", Field.Store.NO));
// parent last:
docs.add(parent);
w.addDocuments(docs);
docs.clear();
parent = new Document();
parent.add(newTextField("parentText", "text", Field.Store.NO));
parent.add(newStringField("isParent", "yes", Field.Store.NO));
parent.add(new StoredField("parentID", "1"));
parent.add(new SortedDocValuesField("parentID", new BytesRef("1")));
// parent last:
docs.add(parent);
w.addDocuments(docs);
IndexReader r = w.getReader();
w.close();
IndexSearcher searcher = newSearcher(r);
// never matches:
Query childQuery = new TermQuery(new Term("childText", "bogus"));
BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("isParent", "yes")));
CheckJoinIndex.check(r, parentsFilter);
ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
Weight weight = searcher.createNormalizedWeight(childJoinQuery, random().nextBoolean());
Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNull(scorer);
// never matches and produces a null scorer
childQuery = new TermQuery(new Term("bogus", "bogus"));
childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
weight = searcher.createNormalizedWeight(childJoinQuery, random().nextBoolean());
scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNull(scorer);
r.close();
d.close();
}
Aggregations