use of org.apache.lucene.index.IndexWriterConfig in project lucene-solr by apache.
the class TestNRTCachingDirectory method testNRTAndCommit.
public void testNRTAndCommit() throws Exception {
Directory dir = newDirectory();
NRTCachingDirectory cachedDir = new NRTCachingDirectory(dir, 2.0, 25.0);
MockAnalyzer analyzer = new MockAnalyzer(random());
analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
RandomIndexWriter w = new RandomIndexWriter(random(), cachedDir, conf);
final LineFileDocs docs = new LineFileDocs(random());
final int numDocs = TestUtil.nextInt(random(), 100, 400);
if (VERBOSE) {
System.out.println("TEST: numDocs=" + numDocs);
}
final List<BytesRef> ids = new ArrayList<>();
DirectoryReader r = null;
for (int docCount = 0; docCount < numDocs; docCount++) {
final Document doc = docs.nextDoc();
ids.add(new BytesRef(doc.get("docid")));
w.addDocument(doc);
if (random().nextInt(20) == 17) {
if (r == null) {
r = DirectoryReader.open(w.w);
} else {
final DirectoryReader r2 = DirectoryReader.openIfChanged(r);
if (r2 != null) {
r.close();
r = r2;
}
}
assertEquals(1 + docCount, r.numDocs());
final IndexSearcher s = newSearcher(r);
// Just make sure search can run; we can't assert
// totHits since it could be 0
TopDocs hits = s.search(new TermQuery(new Term("body", "the")), 10);
// System.out.println("tot hits " + hits.totalHits);
}
}
if (r != null) {
r.close();
}
// Close should force cache to clear since all files are sync'd
w.close();
final String[] cachedFiles = cachedDir.listCachedFiles();
for (String file : cachedFiles) {
System.out.println("FAIL: cached file " + file + " remains after sync");
}
assertEquals(0, cachedFiles.length);
r = DirectoryReader.open(dir);
for (BytesRef id : ids) {
assertEquals(1, r.docFreq(new Term("docid", id)));
}
r.close();
cachedDir.close();
docs.close();
}
use of org.apache.lucene.index.IndexWriterConfig in project lucene-solr by apache.
the class TestSimilarity2 method testNoFieldSkew.
/** make sure scores are not skewed by docs not containing the field */
public void testNoFieldSkew() throws Exception {
Directory dir = newDirectory();
// an evil merge policy could reorder our docs for no reason
IndexWriterConfig iwConfig = newIndexWriterConfig().setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConfig);
Document doc = new Document();
doc.add(newTextField("foo", "bar baz somethingelse", Field.Store.NO));
iw.addDocument(doc);
IndexReader ir = iw.getReader();
IndexSearcher is = newSearcher(ir);
BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
queryBuilder.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
queryBuilder.add(new TermQuery(new Term("foo", "baz")), BooleanClause.Occur.SHOULD);
Query query = queryBuilder.build();
// collect scores
List<Explanation> scores = new ArrayList<>();
for (Similarity sim : sims) {
is.setSimilarity(sim);
scores.add(is.explain(query, 0));
}
ir.close();
// add some additional docs without the field
int numExtraDocs = TestUtil.nextInt(random(), 1, 1000);
for (int i = 0; i < numExtraDocs; i++) {
iw.addDocument(new Document());
}
// check scores are the same
ir = iw.getReader();
is = newSearcher(ir);
for (int i = 0; i < sims.size(); i++) {
is.setSimilarity(sims.get(i));
Explanation expected = scores.get(i);
Explanation actual = is.explain(query, 0);
assertEquals(sims.get(i).toString() + ": actual=" + actual + ",expected=" + expected, expected.getValue(), actual.getValue(), 0F);
}
iw.close();
ir.close();
dir.close();
}
use of org.apache.lucene.index.IndexWriterConfig in project lucene-solr by apache.
the class TestLockFactory method testCustomLockFactory.
// Verify: we can provide our own LockFactory implementation, the right
// methods are called at the right time, locks are created, etc.
public void testCustomLockFactory() throws IOException {
MockLockFactory lf = new MockLockFactory();
Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory(lf));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
// add 100 documents (so that commit lock is used)
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
// Both write lock and commit lock should have been created:
assertEquals("# of unique locks created (after instantiating IndexWriter)", 1, lf.locksCreated.size());
writer.close();
}
use of org.apache.lucene.index.IndexWriterConfig in project lucene-solr by apache.
the class TestLockFactory method testRAMDirectoryNoLocking.
// Verify: we can use the NoLockFactory with RAMDirectory w/ no
// exceptions raised:
// Verify: NoLockFactory allows two IndexWriters
public void testRAMDirectoryNoLocking() throws IOException {
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory(NoLockFactory.INSTANCE));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
// required so the second open succeed
writer.commit();
// Create a 2nd IndexWriter. This is normally not allowed but it should run through since we're not
// using any locks:
IndexWriter writer2 = null;
try {
writer2 = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
} catch (Exception e) {
e.printStackTrace(System.out);
fail("Should not have hit an IOException with no locking");
}
writer.close();
if (writer2 != null) {
writer2.close();
}
}
use of org.apache.lucene.index.IndexWriterConfig in project lucene-solr by apache.
the class SimplePrimaryNode method initWriter.
private static IndexWriter initWriter(int id, Random random, Path indexPath, boolean doCheckIndexOnClose) throws IOException {
Directory dir = SimpleReplicaNode.getDirectory(random, id, indexPath, doCheckIndexOnClose);
MockAnalyzer analyzer = new MockAnalyzer(random);
analyzer.setMaxTokenLength(TestUtil.nextInt(random, 1, IndexWriter.MAX_TERM_LENGTH));
IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(random, analyzer);
MergePolicy mp = iwc.getMergePolicy();
// Force more frequent merging so we stress merge warming:
if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
tmp.setSegmentsPerTier(3);
tmp.setMaxMergeAtOnce(3);
} else if (mp instanceof LogMergePolicy) {
LogMergePolicy lmp = (LogMergePolicy) mp;
lmp.setMergeFactor(3);
}
IndexWriter writer = new IndexWriter(dir, iwc);
TestUtil.reduceOpenFiles(writer);
return writer;
}
Aggregations