use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterExceptions method testRandomExceptionDuringRollback.
public void testRandomExceptionDuringRollback() throws Exception {
// fail in random places on i/o
final int numIters = RANDOM_MULTIPLIER * 75;
for (int iter = 0; iter < numIters; iter++) {
MockDirectoryWrapper dir = newMockDirectory();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (random().nextInt(10) != 0) {
return;
}
boolean maybeFail = false;
StackTraceElement[] trace = Thread.currentThread().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("rollbackInternal".equals(trace[i].getMethodName())) {
maybeFail = true;
break;
}
}
if (maybeFail) {
if (VERBOSE) {
System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
new Throwable().printStackTrace(System.out);
}
throw new FakeIOException();
}
}
});
IndexWriterConfig iwc = new IndexWriterConfig(null);
IndexWriter iw = new IndexWriter(dir, iwc);
Document doc = new Document();
for (int i = 0; i < 10; i++) {
iw.addDocument(doc);
}
iw.commit();
iw.addDocument(doc);
// pool readers
DirectoryReader r = DirectoryReader.open(iw);
// sometimes sneak in a pending commit: we don't want to leak a file handle to that segments_N
if (random().nextBoolean()) {
iw.prepareCommit();
}
try {
iw.rollback();
} catch (FakeIOException expected) {
// ok, we randomly hit exc here
}
r.close();
// even though we hit exception: we are closed, no locks or files held, index in good state
assertTrue(iw.isClosed());
dir.obtainLock(IndexWriter.WRITE_LOCK_NAME).close();
r = DirectoryReader.open(dir);
assertEquals(10, r.maxDoc());
r.close();
// no leaks
dir.close();
}
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestForTooMuchCloning method test.
// Make sure we don't clone IndexInputs too frequently
// during merging and searching:
public void test() throws Exception {
final MockDirectoryWrapper dir = newMockDirectory();
final TieredMergePolicy tmp = new TieredMergePolicy();
tmp.setMaxMergeAtOnce(2);
final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(tmp));
final int numDocs = 20;
for (int docs = 0; docs < numDocs; docs++) {
StringBuilder sb = new StringBuilder();
for (int terms = 0; terms < 100; terms++) {
sb.append(TestUtil.randomRealisticUnicodeString(random()));
sb.append(' ');
}
final Document doc = new Document();
doc.add(new TextField("field", sb.toString(), Field.Store.NO));
w.addDocument(doc);
}
final IndexReader r = w.getReader();
w.close();
//System.out.println("merge clone count=" + cloneCount);
assertTrue("too many calls to IndexInput.clone during merging: " + dir.getInputCloneCount(), dir.getInputCloneCount() < 500);
final IndexSearcher s = newSearcher(r);
// important: set this after newSearcher, it might have run checkindex
final int cloneCount = dir.getInputCloneCount();
// dir.setVerboseClone(true);
// MTQ that matches all terms so the AUTO_REWRITE should
// cutover to filter rewrite and reuse a single DocsEnum
// across all terms;
final TopDocs hits = s.search(new TermRangeQuery("field", new BytesRef(), new BytesRef(""), true, true), 10);
assertTrue(hits.totalHits > 0);
final int queryCloneCount = dir.getInputCloneCount() - cloneCount;
//System.out.println("query clone count=" + queryCloneCount);
assertTrue("too many calls to IndexInput.clone during TermRangeQuery: " + queryCloneCount, queryCloneCount < 50);
r.close();
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexFileDeleter method testSegmentsInflation.
public void testSegmentsInflation() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
// TODO: allow falling back more than one commit
dir.setCheckIndexOnClose(false);
// empty commit
new IndexWriter(dir, new IndexWriterConfig(null)).close();
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
assertEquals(1, sis.getGeneration());
// add trash commit
dir.createOutput(IndexFileNames.SEGMENTS + "_2", IOContext.DEFAULT).close();
// ensure inflation
inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault());
assertEquals(2, sis.getGeneration());
// add another trash commit
dir.createOutput(IndexFileNames.SEGMENTS + "_4", IOContext.DEFAULT).close();
inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault());
assertEquals(4, sis.getGeneration());
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexFileDeleter method testTrashyGenFile.
public void testTrashyGenFile() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
// initial commit
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
iw.addDocument(new Document());
iw.commit();
iw.close();
// no deletes: start at 1
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
assertEquals(1, sis.info(0).getNextDelGen());
// add trash file
dir.createOutput("_1_A", IOContext.DEFAULT).close();
// no inflation
inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault());
assertEquals(1, sis.info(0).getNextDelGen());
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexFileDeleter method testTrashyFile.
public void testTrashyFile() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
// TODO: maybe handle such trash better elsewhere...
dir.setCheckIndexOnClose(false);
// empty commit
new IndexWriter(dir, new IndexWriterConfig(null)).close();
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
assertEquals(1, sis.getGeneration());
// add trash file
dir.createOutput(IndexFileNames.SEGMENTS + "_", IOContext.DEFAULT).close();
// no inflation
inflateGens(sis, Arrays.asList(dir.listAll()), InfoStream.getDefault());
assertEquals(1, sis.getGeneration());
dir.close();
}
Aggregations