use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestCodecHoldsOpenFiles method test.
public void test() throws Exception {
BaseDirectoryWrapper d = newDirectory();
d.setCheckIndexOnClose(false);
// we nuke files, but verify the reader still works
RandomIndexWriter w = new RandomIndexWriter(random(), d);
int numDocs = atLeast(100);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(newField("foo", "bar", TextField.TYPE_NOT_STORED));
doc.add(new IntPoint("doc", i));
doc.add(new IntPoint("doc2d", i, i));
doc.add(new NumericDocValuesField("dv", i));
w.addDocument(doc);
}
IndexReader r = w.getReader();
w.commit();
w.close();
for (String name : d.listAll()) {
d.deleteFile(name);
}
for (LeafReaderContext cxt : r.leaves()) {
TestUtil.checkReader(cxt.reader());
}
r.close();
d.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestSuggestField method testSuggestOnMostlyDeletedDocuments.
@Test
public void testSuggestOnMostlyDeletedDocuments() throws Exception {
Analyzer analyzer = new MockAnalyzer(random());
// using IndexWriter instead of RandomIndexWriter
IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));
int num = Math.min(1000, atLeast(10));
for (int i = 1; i <= num; i++) {
Document document = new Document();
document.add(new SuggestField("suggest_field", "abc_" + i, i));
document.add(new StoredField("weight_fld", i));
document.add(new IntPoint("weight_fld", i));
iw.addDocument(document);
if (usually()) {
iw.commit();
}
}
iw.deleteDocuments(IntPoint.newRangeQuery("weight_fld", 2, Integer.MAX_VALUE));
DirectoryReader reader = DirectoryReader.open(iw);
SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
TopSuggestDocs suggest = indexSearcher.suggest(query, 1, false);
assertSuggestions(suggest, new Entry("abc_1", 1));
reader.close();
iw.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class BasePointsFormatTestCase method testMixedSchema.
// LUCENE-7491
public void testMixedSchema() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
iwc.setMaxBufferedDocs(2);
for (int i = 0; i < 2; i++) {
Document doc = new Document();
doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
doc.add(new IntPoint("int", i));
w.addDocument(doc);
}
// index has 1 segment now (with 2 docs) and that segment does have points, but the "id" field in particular does NOT
Document doc = new Document();
doc.add(new IntPoint("id", 0));
w.addDocument(doc);
// now we write another segment where the id field does have points:
w.forceMerge(1);
IOUtils.close(w, dir);
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class BasePointsFormatTestCase method testAddIndexes.
public void testAddIndexes() throws IOException {
Directory dir1 = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir1);
Document doc = new Document();
doc.add(new IntPoint("int1", 17));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int2", 42));
w.addDocument(doc);
w.close();
// Different field number assigments:
Directory dir2 = newDirectory();
w = new RandomIndexWriter(random(), dir2);
doc = new Document();
doc.add(new IntPoint("int2", 42));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int1", 17));
w.addDocument(doc);
w.close();
Directory dir = newDirectory();
w = new RandomIndexWriter(random(), dir);
w.addIndexes(new Directory[] { dir1, dir2 });
w.forceMerge(1);
DirectoryReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(2, s.count(IntPoint.newExactQuery("int1", 17)));
assertEquals(2, s.count(IntPoint.newExactQuery("int2", 42)));
r.close();
w.close();
dir.close();
dir1.close();
dir2.close();
}
use of org.apache.lucene.document.IntPoint in project lucene-solr by apache.
the class TestPerFieldPostingsFormat2 method testMergeCalledOnTwoFormats.
@SuppressWarnings("deprecation")
public void testMergeCalledOnTwoFormats() throws IOException {
MergeRecordingPostingsFormatWrapper pf1 = new MergeRecordingPostingsFormatWrapper(TestUtil.getDefaultPostingsFormat());
MergeRecordingPostingsFormatWrapper pf2 = new MergeRecordingPostingsFormatWrapper(TestUtil.getDefaultPostingsFormat());
IndexWriterConfig iwc = new IndexWriterConfig();
iwc.setCodec(new AssertingCodec() {
@Override
public PostingsFormat getPostingsFormatForField(String field) {
switch(field) {
case "f1":
case "f2":
return pf1;
case "f3":
case "f4":
return pf2;
default:
return super.getPostingsFormatForField(field);
}
}
});
Directory directory = newDirectory();
IndexWriter iwriter = new IndexWriter(directory, iwc);
Document doc = new Document();
doc.add(new StringField("f1", "val1", Field.Store.NO));
doc.add(new StringField("f2", "val2", Field.Store.YES));
// Points are not indexed as postings and should not appear in the merge fields
doc.add(new IntPoint("f3", 3));
doc.add(new StringField("f4", "val4", Field.Store.NO));
iwriter.addDocument(doc);
iwriter.commit();
doc = new Document();
doc.add(new StringField("f1", "val5", Field.Store.NO));
doc.add(new StringField("f2", "val6", Field.Store.YES));
doc.add(new IntPoint("f3", 7));
doc.add(new StringField("f4", "val8", Field.Store.NO));
iwriter.addDocument(doc);
iwriter.commit();
iwriter.forceMerge(1, true);
iwriter.close();
assertEquals(1, pf1.nbMergeCalls);
assertEquals(new HashSet<>(Arrays.asList("f1", "f2")), new HashSet<>(pf1.fieldNames));
assertEquals(1, pf2.nbMergeCalls);
assertEquals(Collections.singletonList("f4"), pf2.fieldNames);
directory.close();
}
Aggregations