use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterOnVMError method testUnknownError.
public void testUnknownError() throws Exception {
final Random r = new Random(random().nextLong());
doTest(new Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (r.nextInt(3000) == 0) {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
boolean ok = false;
for (int i = 0; i < stack.length; i++) {
if (stack[i].getClassName().equals(IndexWriter.class.getName())) {
ok = true;
}
}
if (ok) {
throw new UnknownError("Fake UnknownError");
}
}
}
});
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterReader method testNRTOpenExceptions.
@Test
public void testNRTOpenExceptions() throws Exception {
// LUCENE-5262: test that several failed attempts to obtain an NRT reader
// don't leak file handles.
MockDirectoryWrapper dir = (MockDirectoryWrapper) getAssertNoDeletesDirectory(newMockDirectory());
final AtomicBoolean shouldFail = new AtomicBoolean();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
if (shouldFail.get()) {
for (int i = 0; i < trace.length; i++) {
if ("getReadOnlyClone".equals(trace[i].getMethodName())) {
if (VERBOSE) {
System.out.println("TEST: now fail; exc:");
new Throwable().printStackTrace(System.out);
}
shouldFail.set(false);
throw new FakeIOException();
}
}
}
}
});
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
// prevent merges from getting in the way
conf.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter writer = new IndexWriter(dir, conf);
// create a segment and open an NRT reader
writer.addDocument(new Document());
writer.getReader().close();
// add a new document so a new NRT reader is required
writer.addDocument(new Document());
// other NRT reader, since it is already marked closed!
for (int i = 0; i < 2; i++) {
shouldFail.set(true);
expectThrows(FakeIOException.class, () -> {
writer.getReader().close();
});
}
writer.close();
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterExceptions method testExceptionDuringSync.
// LUCENE-1044: test exception during sync
public void testExceptionDuringSync() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
FailOnlyInSync failure = new FailOnlyInSync();
dir.failOn(failure);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy(5)));
failure.setDoFail();
for (int i = 0; i < 23; i++) {
addDoc(writer);
if ((i - 1) % 2 == 0) {
try {
writer.commit();
} catch (IOException ioe) {
// expected
}
}
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(failure.didFail);
failure.clearDoFail();
writer.close();
IndexReader reader = DirectoryReader.open(dir);
assertEquals(23, reader.numDocs());
reader.close();
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterOnDiskFull method testAddIndexOnDiskFull.
// TODO: make @Nightly variant that provokes more disk
// fulls
// TODO: have test fail if on any given top
// iter there was not a single IOE hit
/*
Test: make sure when we run out of disk space or hit
random IOExceptions in any of the addIndexes(*) calls
that 1) index is not corrupt (searcher can open/search
it) and 2) transactional semantics are followed:
either all or none of the incoming documents were in
fact added.
*/
public void testAddIndexOnDiskFull() throws IOException {
// MemoryCodec, since it uses FST, is not necessarily
// "additive", ie if you add up N small FSTs, then merge
// them, the merged result can easily be larger than the
// sum because the merged FST may use array encoding for
// some arcs (which uses more space):
final String idFormat = TestUtil.getPostingsFormat("id");
final String contentFormat = TestUtil.getPostingsFormat("content");
assumeFalse("This test cannot run with Memory codec", idFormat.equals("Memory") || contentFormat.equals("Memory"));
int START_COUNT = 57;
int NUM_DIR = TEST_NIGHTLY ? 50 : 5;
int END_COUNT = START_COUNT + NUM_DIR * (TEST_NIGHTLY ? 25 : 5);
// Build up a bunch of dirs that have indexes which we
// will then merge together by calling addIndexes(*):
Directory[] dirs = new Directory[NUM_DIR];
long inputDiskUsage = 0;
for (int i = 0; i < NUM_DIR; i++) {
dirs[i] = newDirectory();
IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig(new MockAnalyzer(random())));
for (int j = 0; j < 25; j++) {
addDocWithIndex(writer, 25 * i + j);
}
writer.close();
String[] files = dirs[i].listAll();
for (int j = 0; j < files.length; j++) {
inputDiskUsage += dirs[i].fileLength(files[j]);
}
}
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexes into a copy of this:
MockDirectoryWrapper startDir = newMockDirectory();
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(new MockAnalyzer(random())));
for (int j = 0; j < START_COUNT; j++) {
addDocWithIndex(writer, j);
}
writer.close();
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = DirectoryReader.open(startDir);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), 1000).scoreDocs;
assertEquals("first number of hits", 57, hits.length);
reader.close();
// Iterate with larger and larger amounts of free
// disk space. With little free disk space,
// addIndexes will certainly run out of space &
// fail. Verify that when this happens, index is
// not corrupt and index in fact has added no
// documents. Then, we increase disk space by 2000
// bytes each iteration. At some point there is
// enough free disk space and addIndexes should
// succeed and index should show all documents were
// added.
// String[] files = startDir.listAll();
long diskUsage = startDir.sizeInBytes();
long startDiskUsage = 0;
String[] files = startDir.listAll();
for (int i = 0; i < files.length; i++) {
startDiskUsage += startDir.fileLength(files[i]);
}
for (int iter = 0; iter < 3; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
// Start with 100 bytes more than we are currently using:
long diskFree = diskUsage + TestUtil.nextInt(random(), 50, 200);
int method = iter;
boolean success = false;
boolean done = false;
String methodName;
if (0 == method) {
methodName = "addIndexes(Directory[]) + forceMerge(1)";
} else if (1 == method) {
methodName = "addIndexes(IndexReader[])";
} else {
methodName = "addIndexes(Directory[])";
}
while (!done) {
if (VERBOSE) {
System.out.println("TEST: cycle...");
}
// Make a new dir that will enforce disk usage:
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(startDir));
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy(false));
writer = new IndexWriter(dir, iwc);
Exception err = null;
for (int x = 0; x < 2; x++) {
MergeScheduler ms = writer.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
// want to pollute test output with these.
if (0 == x) {
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
} else {
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
}
}
// Two loops: first time, limit disk space &
// throw random IOExceptions; second time, no
// disk space limit:
double rate = 0.05;
double diskRatio = ((double) diskFree) / diskUsage;
long thisDiskFree;
String testName = null;
if (0 == x) {
dir.setRandomIOExceptionRateOnOpen(random().nextDouble() * 0.01);
thisDiskFree = diskFree;
if (diskRatio >= 2.0) {
rate /= 2;
}
if (diskRatio >= 4.0) {
rate /= 2;
}
if (diskRatio >= 6.0) {
rate = 0.0;
}
if (VERBOSE) {
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
}
} else {
dir.setRandomIOExceptionRateOnOpen(0.0);
thisDiskFree = 0;
rate = 0.0;
if (VERBOSE) {
testName = "disk full test " + methodName + " with unlimited disk space";
}
}
if (VERBOSE) {
System.out.println("\ncycle: " + testName);
}
dir.setTrackDiskUsage(true);
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate);
try {
if (0 == method) {
if (VERBOSE) {
System.out.println("TEST: now addIndexes count=" + dirs.length);
}
writer.addIndexes(dirs);
if (VERBOSE) {
System.out.println("TEST: now forceMerge");
}
writer.forceMerge(1);
} else if (1 == method) {
DirectoryReader[] readers = new DirectoryReader[dirs.length];
for (int i = 0; i < dirs.length; i++) {
readers[i] = DirectoryReader.open(dirs[i]);
}
try {
TestUtil.addIndexesSlowly(writer, readers);
} finally {
for (int i = 0; i < dirs.length; i++) {
readers[i].close();
}
}
} else {
writer.addIndexes(dirs);
}
success = true;
if (VERBOSE) {
System.out.println(" success!");
}
if (0 == x) {
done = true;
}
} catch (IllegalStateException | IOException e) {
success = false;
err = e;
if (VERBOSE) {
System.out.println(" hit Exception: " + e);
e.printStackTrace(System.out);
}
if (1 == x) {
e.printStackTrace(System.out);
fail(methodName + " hit IOException after disk space was freed up");
}
}
if (x == 1) {
// Make sure all threads from ConcurrentMergeScheduler are done
TestUtil.syncConcurrentMerges(writer);
} else {
dir.setRandomIOExceptionRateOnOpen(0.0);
writer.rollback();
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy(false)));
}
if (VERBOSE) {
System.out.println(" now test readers");
}
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs added, and if we
// failed, we see either all docs or no docs added
// (transactional semantics):
dir.setRandomIOExceptionRateOnOpen(0.0);
try {
reader = DirectoryReader.open(dir);
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when creating IndexReader: " + e);
}
int result = reader.docFreq(searchTerm);
if (success) {
if (result != START_COUNT) {
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
}
} else {
// all docs:
if (result != START_COUNT && result != END_COUNT) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
}
searcher = newSearcher(reader);
try {
hits = searcher.search(new TermQuery(searchTerm), END_COUNT).scoreDocs;
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when searching: " + e);
}
int result2 = hits.length;
if (success) {
if (result2 != result) {
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
} else {
// all docs:
if (result2 != result) {
err.printStackTrace(System.out);
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
}
}
reader.close();
if (VERBOSE) {
System.out.println(" count is " + result);
}
if (done || result == END_COUNT) {
break;
}
}
if (VERBOSE) {
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
}
if (done) {
// Javadocs state that temp free Directory space
// required is at most 2X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName + ": max temp usage = " + (dir.getMaxUsedSizeInBytes() - startDiskUsage) + " bytes vs limit=" + (2 * (startDiskUsage + inputDiskUsage)) + "; starting disk usage = " + startDiskUsage + " bytes; " + "input index disk usage = " + inputDiskUsage + " bytes", (dir.getMaxUsedSizeInBytes() - startDiskUsage) < 2 * (startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
dir.setMaxSizeInBytes(0);
dir.setRandomIOExceptionRate(0.0);
dir.setRandomIOExceptionRateOnOpen(0.0);
writer.close();
dir.close();
// Try again with more free space:
diskFree += TEST_NIGHTLY ? TestUtil.nextInt(random(), 4000, 8000) : TestUtil.nextInt(random(), 40000, 80000);
}
}
startDir.close();
for (Directory dir : dirs) dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterMaxDocs method testAddTooManyIndexesDir.
/**
* LUCENE-6299: Test if addindexes(Dir[]) prevents exceeding max docs.
*/
public void testAddTooManyIndexesDir() throws Exception {
// we cheat and add the same one over again... IW wants a write lock on each
Directory dir = newDirectory(random(), NoLockFactory.INSTANCE);
Document doc = new Document();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
for (int i = 0; i < 100000; i++) {
w.addDocument(doc);
}
w.forceMerge(1);
w.commit();
w.close();
// wrap this with disk full, so test fails faster and doesn't fill up real disks.
MockDirectoryWrapper dir2 = newMockDirectory();
w = new IndexWriter(dir2, new IndexWriterConfig(null));
// don't confuse checkindex
w.commit();
// 64KB
dir2.setMaxSizeInBytes(dir2.sizeInBytes() + 65536);
Directory[] dirs = new Directory[1 + (IndexWriter.MAX_DOCS / 100000)];
for (int i = 0; i < dirs.length; i++) {
// bypass iw check for duplicate dirs
dirs[i] = new FilterDirectory(dir) {
};
}
try {
w.addIndexes(dirs);
fail("didn't get expected exception");
} catch (IllegalArgumentException expected) {
// pass
} catch (IOException fakeDiskFull) {
final Exception e;
if (fakeDiskFull.getMessage() != null && fakeDiskFull.getMessage().startsWith("fake disk full")) {
e = new RuntimeException("test failed: IW checks aren't working and we are executing addIndexes");
e.addSuppressed(fakeDiskFull);
} else {
e = fakeDiskFull;
}
throw e;
}
w.close();
dir.close();
dir2.close();
}
Aggregations