use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterDelete method doTestOperationsOnDiskFull.
/**
* Make sure if modifier tries to commit but hits disk full that modifier
* remains consistent and usable. Similar to TestIndexReader.testDiskFull().
*/
private void doTestOperationsOnDiskFull(boolean updates) throws IOException {
Term searchTerm = new Term("content", "aaa");
int START_COUNT = 157;
int END_COUNT = 144;
// First build up a starting index:
MockDirectoryWrapper startDir = newMockDirectory();
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
for (int i = 0; i < 157; i++) {
Document d = new Document();
d.add(newStringField("id", Integer.toString(i), Field.Store.YES));
d.add(newTextField("content", "aaa " + i, Field.Store.NO));
d.add(new NumericDocValuesField("dv", i));
writer.addDocument(d);
}
writer.close();
long diskUsage = startDir.sizeInBytes();
long diskFree = diskUsage + 10;
IOException err = null;
boolean done = false;
// Iterate w/ ever increasing free disk space:
while (!done) {
if (VERBOSE) {
System.out.println("TEST: cycle");
}
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(startDir));
dir.setAllowRandomFileNotFoundException(false);
IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(1000).setMaxBufferedDeleteTerms(1000).setMergeScheduler(new ConcurrentMergeScheduler()));
((ConcurrentMergeScheduler) modifier.getConfig().getMergeScheduler()).setSuppressExceptions();
// For each disk size, first try to commit against
// dir that will hit random IOExceptions & disk
// full; after, give it infinite disk space & turn
// off random IOExceptions & retry w/ same reader:
boolean success = false;
for (int x = 0; x < 2; x++) {
if (VERBOSE) {
System.out.println("TEST: x=" + x);
}
double rate = 0.1;
double diskRatio = ((double) diskFree) / diskUsage;
long thisDiskFree;
String testName;
if (0 == x) {
thisDiskFree = diskFree;
if (diskRatio >= 2.0) {
rate /= 2;
}
if (diskRatio >= 4.0) {
rate /= 2;
}
if (diskRatio >= 6.0) {
rate = 0.0;
}
if (VERBOSE) {
System.out.println("\ncycle: " + diskFree + " bytes");
}
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
dir.setRandomIOExceptionRateOnOpen(random().nextDouble() * 0.01);
} else {
thisDiskFree = 0;
rate = 0.0;
if (VERBOSE) {
System.out.println("\ncycle: same writer: unlimited disk space");
}
testName = "reader re-use after disk full";
dir.setRandomIOExceptionRateOnOpen(0.0);
}
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate);
try {
if (0 == x) {
int docId = 12;
for (int i = 0; i < 13; i++) {
if (updates) {
Document d = new Document();
d.add(newStringField("id", Integer.toString(i), Field.Store.YES));
d.add(newTextField("content", "bbb " + i, Field.Store.NO));
d.add(new NumericDocValuesField("dv", i));
modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
} else {
// deletes
modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
// modifier.setNorm(docId, "contents", (float)2.0);
}
docId += 12;
}
try {
modifier.close();
} catch (IllegalStateException ise) {
// ok
throw (IOException) ise.getCause();
}
}
success = true;
if (0 == x) {
done = true;
}
} catch (IOException e) {
if (VERBOSE) {
System.out.println(" hit IOException: " + e);
e.printStackTrace(System.out);
}
err = e;
if (1 == x) {
e.printStackTrace();
fail(testName + " hit IOException after disk space was freed up");
}
}
// prevent throwing a random exception here!!
final double randomIOExceptionRate = dir.getRandomIOExceptionRate();
final long maxSizeInBytes = dir.getMaxSizeInBytes();
dir.setRandomIOExceptionRate(0.0);
dir.setRandomIOExceptionRateOnOpen(0.0);
dir.setMaxSizeInBytes(0);
if (!success) {
// open files which cause exc in MockRAMDir.close
if (VERBOSE) {
System.out.println("TEST: now rollback");
}
modifier.rollback();
}
// If the close() succeeded, make sure index is OK:
if (success) {
TestUtil.checkIndex(dir);
}
dir.setRandomIOExceptionRate(randomIOExceptionRate);
dir.setMaxSizeInBytes(maxSizeInBytes);
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs changed, and if
// we failed, we see either all docs or no docs
// changed (transactional semantics):
IndexReader newReader = null;
try {
newReader = DirectoryReader.open(dir);
} catch (IOException e) {
e.printStackTrace();
fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
}
IndexSearcher searcher = newSearcher(newReader);
ScoreDoc[] hits = null;
try {
hits = searcher.search(new TermQuery(searchTerm), 1000).scoreDocs;
} catch (IOException e) {
e.printStackTrace();
fail(testName + ": exception when searching: " + e);
}
int result2 = hits.length;
if (success) {
if (x == 0 && result2 != END_COUNT) {
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
} else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) {
// It's possible that the first exception was
// "recoverable" wrt pending deletes, in which
// case the pending deletes are retained and
// then re-flushing (with plenty of disk
// space) will succeed in flushing the
// deletes:
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
} else {
// all docs:
if (result2 != START_COUNT && result2 != END_COUNT) {
err.printStackTrace();
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
}
}
newReader.close();
if (result2 == END_COUNT) {
break;
}
}
dir.close();
// Try again with 10 more bytes of free space:
diskFree += 10;
}
startDir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class TestIndexWriterDelete method testErrorAfterApplyDeletes.
// This test tests that buffered deletes are cleared when
// an Exception is hit during flush.
public void testErrorAfterApplyDeletes() throws IOException {
MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() {
boolean sawMaybe = false;
boolean failed = false;
Thread thread;
@Override
public MockDirectoryWrapper.Failure reset() {
thread = Thread.currentThread();
sawMaybe = false;
failed = false;
return this;
}
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (Thread.currentThread() != thread) {
// don't fail during merging
return;
}
if (sawMaybe && !failed) {
boolean seen = false;
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("applyDeletesAndUpdates".equals(trace[i].getMethodName()) || "slowFileExists".equals(trace[i].getMethodName())) {
seen = true;
break;
}
}
if (!seen) {
// Only fail once we are no longer in applyDeletes
failed = true;
if (VERBOSE) {
System.out.println("TEST: mock failure: now fail");
new Throwable().printStackTrace(System.out);
}
throw new RuntimeException("fail after applyDeletes");
}
}
if (!failed) {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("applyDeletesAndUpdates".equals(trace[i].getMethodName())) {
if (VERBOSE) {
System.out.println("TEST: mock failure: saw applyDeletes");
new Throwable().printStackTrace(System.out);
}
sawMaybe = true;
break;
}
}
}
}
};
// create a couple of files
String[] keywords = { "1", "2" };
String[] unindexed = { "Netherlands", "Italy" };
String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" };
String[] text = { "Amsterdam", "Venice" };
MockDirectoryWrapper dir = newMockDirectory();
IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy()));
MergePolicy lmp = modifier.getConfig().getMergePolicy();
lmp.setNoCFSRatio(1.0);
dir.failOn(failure.reset());
FieldType custom1 = new FieldType();
custom1.setStored(true);
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(newStringField("id", keywords[i], Field.Store.YES));
doc.add(newField("country", unindexed[i], custom1));
doc.add(newTextField("contents", unstored[i], Field.Store.NO));
doc.add(newTextField("city", text[i], Field.Store.YES));
modifier.addDocument(doc);
}
if (VERBOSE) {
System.out.println("TEST: now full merge");
}
modifier.forceMerge(1);
if (VERBOSE) {
System.out.println("TEST: now commit");
}
modifier.commit();
// one of the two files hits
Term term = new Term("city", "Amsterdam");
int hitCount = getHitCount(dir, term);
assertEquals(1, hitCount);
if (VERBOSE) {
System.out.println("TEST: delete term=" + term);
}
modifier.deleteDocuments(term);
if (VERBOSE) {
System.out.println("TEST: add empty doc");
}
Document doc = new Document();
modifier.addDocument(doc);
if (VERBOSE) {
System.out.println("TEST: now commit for failure");
}
RuntimeException expected = expectThrows(RuntimeException.class, () -> {
modifier.commit();
});
if (VERBOSE) {
System.out.println("TEST: hit exc:");
expected.printStackTrace(System.out);
}
// The commit above failed, so we need to retry it (which will
// succeed, because the failure is a one-shot)
boolean writerClosed;
try {
modifier.commit();
writerClosed = false;
} catch (IllegalStateException ise) {
// The above exc struck during merge, and closed the writer
writerClosed = true;
}
if (writerClosed == false) {
hitCount = getHitCount(dir, term);
// Make sure the delete was successfully flushed:
assertEquals(0, hitCount);
modifier.close();
}
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class BasePointsFormatTestCase method testWithExceptions.
/** Make sure we close open files, delete temp files, etc., on exception */
public void testWithExceptions() throws Exception {
int numDocs = atLeast(10000);
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
byte[][][] docValues = new byte[numDocs][][];
for (int docID = 0; docID < numDocs; docID++) {
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
docValues[docID] = values;
}
// Keep retrying until we 1) we allow a big enough heap, and 2) we hit a random IOExc from MDW:
boolean done = false;
while (done == false) {
try (MockDirectoryWrapper dir = newMockFSDirectory(createTempDir())) {
try {
dir.setRandomIOExceptionRate(0.05);
dir.setRandomIOExceptionRateOnOpen(0.05);
verify(dir, docValues, null, numDims, numBytesPerDim, true);
} catch (IllegalStateException ise) {
done = handlePossiblyFakeException(ise);
} catch (AssertionError ae) {
if (ae.getMessage() != null && ae.getMessage().contains("does not exist; files=")) {
// OK: likely we threw the random IOExc when IW was asserting the commit files exist
done = true;
} else {
throw ae;
}
} catch (IllegalArgumentException iae) {
// This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry w/ more heap
assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
} catch (IOException ioe) {
done = handlePossiblyFakeException(ioe);
}
}
}
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class BaseSegmentInfoFormatTestCase method testExceptionOnCreateOutput.
/**
* Test segment infos write that hits exception immediately on open.
* make sure we get our exception back, no file handle leaks, etc.
*/
public void testExceptionOnCreateOutput() throws Exception {
Failure fail = new Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
for (StackTraceElement e : Thread.currentThread().getStackTrace()) {
if (doFail && "createOutput".equals(e.getMethodName())) {
throw new FakeIOException();
}
}
}
};
MockDirectoryWrapper dir = newMockDirectory();
dir.failOn(fail);
Codec codec = getCodec();
byte[] id = StringHelper.randomId();
SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null);
info.setFiles(Collections.<String>emptySet());
fail.setDoFail();
expectThrows(FakeIOException.class, () -> {
codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT);
});
fail.clearDoFail();
dir.close();
}
use of org.apache.lucene.store.MockDirectoryWrapper in project lucene-solr by apache.
the class BaseSegmentInfoFormatTestCase method testExceptionOnCloseOutput.
/**
* Test segment infos write that hits exception on close.
* make sure we get our exception back, no file handle leaks, etc.
*/
public void testExceptionOnCloseOutput() throws Exception {
Failure fail = new Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
for (StackTraceElement e : Thread.currentThread().getStackTrace()) {
if (doFail && "close".equals(e.getMethodName())) {
throw new FakeIOException();
}
}
}
};
MockDirectoryWrapper dir = newMockDirectory();
dir.failOn(fail);
Codec codec = getCodec();
byte[] id = StringHelper.randomId();
SegmentInfo info = new SegmentInfo(dir, getVersions()[0], getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null);
info.setFiles(Collections.<String>emptySet());
fail.setDoFail();
expectThrows(FakeIOException.class, () -> {
codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT);
});
fail.clearDoFail();
dir.close();
}
Aggregations