use of org.apache.lucene.store.AlreadyClosedException in project elasticsearch by elastic.
the class ExceptionSerializationTests method testWriteThrowable.
public void testWriteThrowable() throws IOException {
final QueryShardException queryShardException = new QueryShardException(new Index("foo", "_na_"), "foobar", null);
final UnknownException unknownException = new UnknownException("this exception is unknown", queryShardException);
final Exception[] causes = new Exception[] { new IllegalStateException("foobar"), new IllegalArgumentException("alalaal"), new NullPointerException("boom"), new EOFException("dadada"), new ElasticsearchSecurityException("nono!"), new NumberFormatException("not a number"), new CorruptIndexException("baaaam booom", "this is my resource"), new IndexFormatTooNewException("tooo new", 1, 2, 3), new IndexFormatTooOldException("tooo new", 1, 2, 3), new IndexFormatTooOldException("tooo new", "very old version"), new ArrayIndexOutOfBoundsException("booom"), new StringIndexOutOfBoundsException("booom"), new FileNotFoundException("booom"), new NoSuchFileException("booom"), new AlreadyClosedException("closed!!", new NullPointerException()), new LockObtainFailedException("can't lock directory", new NullPointerException()), unknownException };
for (final Exception cause : causes) {
ElasticsearchException ex = new ElasticsearchException("topLevel", cause);
ElasticsearchException deserialized = serialize(ex);
assertEquals(deserialized.getMessage(), ex.getMessage());
assertTrue("Expected: " + deserialized.getCause().getMessage() + " to contain: " + ex.getCause().getClass().getName() + " but it didn't", deserialized.getCause().getMessage().contains(ex.getCause().getMessage()));
if (ex.getCause().getClass() != UnknownException.class) {
// unknown exception is not directly mapped
assertEquals(deserialized.getCause().getClass(), ex.getCause().getClass());
} else {
assertEquals(deserialized.getCause().getClass(), NotSerializableExceptionWrapper.class);
}
assertArrayEquals(deserialized.getStackTrace(), ex.getStackTrace());
assertTrue(deserialized.getStackTrace().length > 1);
assertVersionSerializable(VersionUtils.randomVersion(random()), cause);
assertVersionSerializable(VersionUtils.randomVersion(random()), ex);
assertVersionSerializable(VersionUtils.randomVersion(random()), deserialized);
}
}
use of org.apache.lucene.store.AlreadyClosedException in project elasticsearch by elastic.
the class RefCountedTests method testMultiThreaded.
public void testMultiThreaded() throws InterruptedException {
final MyRefCounted counted = new MyRefCounted();
Thread[] threads = new Thread[randomIntBetween(2, 5)];
final CountDownLatch latch = new CountDownLatch(1);
final CopyOnWriteArrayList<Exception> exceptions = new CopyOnWriteArrayList<>();
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
latch.await();
for (int j = 0; j < 10000; j++) {
counted.incRef();
try {
counted.ensureOpen();
} finally {
counted.decRef();
}
}
} catch (Exception e) {
exceptions.add(e);
}
}
};
threads[i].start();
}
latch.countDown();
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
counted.decRef();
try {
counted.ensureOpen();
fail("expected to be closed");
} catch (AlreadyClosedException ex) {
assertThat(ex.getMessage(), equalTo("closed"));
}
assertThat(counted.refCount(), is(0));
assertThat(exceptions, Matchers.emptyIterable());
}
use of org.apache.lucene.store.AlreadyClosedException in project geode by apache.
the class LuceneEventListener method process.
protected boolean process(final List<AsyncEvent> events) {
// Try to get a PDX instance if possible, rather than a deserialized object
DefaultQuery.setPdxReadSerialized(true);
Set<IndexRepository> affectedRepos = new HashSet<IndexRepository>();
try {
for (AsyncEvent event : events) {
Region region = event.getRegion();
Object key = event.getKey();
Object callbackArgument = event.getCallbackArgument();
IndexRepository repository = repositoryManager.getRepository(region, key, callbackArgument);
Object value = getValue(region.getEntry(key));
if (value != null) {
repository.update(key, value);
} else {
repository.delete(key);
}
affectedRepos.add(repository);
}
for (IndexRepository repo : affectedRepos) {
repo.commit();
}
return true;
} catch (BucketNotFoundException | RegionDestroyedException | PrimaryBucketException e) {
logger.debug("Bucket not found while saving to lucene index: " + e.getMessage(), e);
return false;
} catch (CacheClosedException e) {
logger.debug("Unable to save to lucene index, cache has been closed", e);
return false;
} catch (AlreadyClosedException e) {
logger.debug("Unable to commit, the lucene index is already closed", e);
return false;
} catch (IOException e) {
throw new InternalGemFireError("Unable to save to lucene index", e);
} finally {
DefaultQuery.setPdxReadSerialized(false);
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestIndexWriterExceptions method testMergeExceptionIsTragic.
public void testMergeExceptionIsTragic() throws Exception {
MockDirectoryWrapper dir = newMockDirectory();
final AtomicBoolean didFail = new AtomicBoolean();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (random().nextInt(10) != 0) {
return;
}
if (didFail.get()) {
// Already failed
return;
}
StackTraceElement[] trace = Thread.currentThread().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("merge".equals(trace[i].getMethodName())) {
if (VERBOSE) {
System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
new Throwable().printStackTrace(System.out);
}
didFail.set(true);
throw new FakeIOException();
}
}
}
});
IndexWriterConfig iwc = newIndexWriterConfig();
MergePolicy mp = iwc.getMergePolicy();
if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
if (tmp.getMaxMergedSegmentMB() < 0.2) {
tmp.setMaxMergedSegmentMB(0.2);
}
}
MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
}
IndexWriter w = new IndexWriter(dir, iwc);
while (true) {
try {
Document doc = new Document();
doc.add(newStringField("field", "string", Field.Store.NO));
w.addDocument(doc);
if (random().nextInt(10) == 7) {
// Flush new segment:
DirectoryReader.open(w).close();
}
} catch (AlreadyClosedException ace) {
// OK: e.g. CMS hit the exc in BG thread and closed the writer
break;
} catch (FakeIOException fioe) {
// OK: e.g. SMS hit the exception
break;
}
}
assertNotNull(w.getTragicException());
assertFalse(w.isOpen());
assertTrue(didFail.get());
if (ms instanceof ConcurrentMergeScheduler) {
// Sneaky: CMS's merge thread will be concurrently rolling back IW due
// to the tragedy, with this main thread, so we have to wait here
// to ensure the rollback has finished, else MDW still sees open files:
((ConcurrentMergeScheduler) ms).sync();
}
dir.close();
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestIndexWriterExceptions method testNoLostDeletesOrUpdates.
// Make sure if we hit a transient IOException (e.g., disk
// full), and then the exception stops (e.g., disk frees
// up), so we successfully close IW or open an NRT
// reader, we don't lose any deletes or updates:
public void testNoLostDeletesOrUpdates() throws Throwable {
int deleteCount = 0;
int docBase = 0;
int docCount = 0;
MockDirectoryWrapper dir = newMockDirectory();
final AtomicBoolean shouldFail = new AtomicBoolean();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (shouldFail.get() == false) {
// flushing buffer, on closing the file:
return;
}
if (random().nextInt(3) != 2) {
return;
}
StackTraceElement[] trace = Thread.currentThread().getStackTrace();
boolean sawSeal = false;
boolean sawWrite = false;
for (int i = 0; i < trace.length; i++) {
if ("sealFlushedSegment".equals(trace[i].getMethodName())) {
sawSeal = true;
break;
}
if ("writeLiveDocs".equals(trace[i].getMethodName()) || "writeFieldUpdates".equals(trace[i].getMethodName())) {
sawWrite = true;
}
}
// the segment is aborted and docs are lost:
if (sawWrite && sawSeal == false) {
if (VERBOSE) {
System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
new Throwable().printStackTrace(System.out);
}
shouldFail.set(false);
throw new FakeIOException();
}
}
});
RandomIndexWriter w = null;
boolean tragic = false;
for (int iter = 0; iter < 10 * RANDOM_MULTIPLIER; iter++) {
int numDocs = atLeast(100);
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " numDocs=" + numDocs + " docBase=" + docBase + " delCount=" + deleteCount);
}
if (w == null) {
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
w = new RandomIndexWriter(random(), dir, iwc);
// Since we hit exc during merging, a partial
// forceMerge can easily return when there are still
// too many segments in the index:
w.setDoRandomForceMergeAssert(false);
}
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + (docBase + i), Field.Store.NO));
doc.add(new NumericDocValuesField("f", 1L));
doc.add(new NumericDocValuesField("cf", 2L));
doc.add(new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(1L)));
doc.add(new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(2L)));
w.addDocument(doc);
}
docCount += numDocs;
// TODO: we could make the test more evil, by letting
// it throw more than one exc, randomly, before "recovering"
// TODO: we could also install an infoStream and try
// to fail in "more evil" places inside BDS
shouldFail.set(true);
boolean doClose = false;
try {
for (int i = 0; i < numDocs; i++) {
if (random().nextInt(10) == 7) {
boolean fieldUpdate = random().nextBoolean();
int docid = docBase + i;
if (fieldUpdate) {
long value = iter;
if (VERBOSE) {
System.out.println(" update id=" + docid + " to value " + value);
}
Term idTerm = new Term("id", Integer.toString(docid));
if (random().nextBoolean()) {
// update only numeric field
w.updateDocValues(idTerm, new NumericDocValuesField("f", value), new NumericDocValuesField("cf", value * 2));
} else if (random().nextBoolean()) {
w.updateDocValues(idTerm, new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(value)), new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(value * 2)));
} else {
w.updateDocValues(idTerm, new NumericDocValuesField("f", value), new NumericDocValuesField("cf", value * 2), new BinaryDocValuesField("bf", TestBinaryDocValuesUpdates.toBytes(value)), new BinaryDocValuesField("bcf", TestBinaryDocValuesUpdates.toBytes(value * 2)));
}
}
// sometimes do both deletes and updates
if (!fieldUpdate || random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" delete id=" + docid);
}
deleteCount++;
w.deleteDocuments(new Term("id", "" + docid));
}
}
}
// Trigger writeLiveDocs + writeFieldUpdates so we hit fake exc:
IndexReader r = w.getReader();
// Sometimes we will make it here (we only randomly
// throw the exc):
assertEquals(docCount - deleteCount, r.numDocs());
r.close();
// Sometimes close, so the disk full happens on close:
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" now close writer");
}
doClose = true;
w.commit();
w.close();
w = null;
}
} catch (Throwable t) {
// throws it as a wrapped IOE, so don't fail in this case.
if (t instanceof FakeIOException || (t.getCause() instanceof FakeIOException)) {
// expected
if (VERBOSE) {
System.out.println("TEST: hit expected IOE");
}
if (t instanceof AlreadyClosedException) {
// FakeIOExc struck during merge and writer is now closed:
w = null;
tragic = true;
}
} else {
throw t;
}
}
shouldFail.set(false);
if (w != null) {
MergeScheduler ms = w.w.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).sync();
}
if (w.w.getTragicException() != null) {
// Tragic exc in CMS closed the writer
w = null;
}
}
IndexReader r;
if (doClose && w != null) {
if (VERBOSE) {
System.out.println(" now 2nd close writer");
}
w.close();
w = null;
}
if (w == null || random().nextBoolean()) {
// disk" bits are good:
if (VERBOSE) {
System.out.println("TEST: verify against non-NRT reader");
}
if (w != null) {
w.commit();
}
r = DirectoryReader.open(dir);
} else {
if (VERBOSE) {
System.out.println("TEST: verify against NRT reader");
}
r = w.getReader();
}
if (tragic == false) {
assertEquals(docCount - deleteCount, r.numDocs());
}
BytesRef scratch = new BytesRef();
for (LeafReaderContext context : r.leaves()) {
LeafReader reader = context.reader();
Bits liveDocs = reader.getLiveDocs();
NumericDocValues f = reader.getNumericDocValues("f");
NumericDocValues cf = reader.getNumericDocValues("cf");
BinaryDocValues bf = reader.getBinaryDocValues("bf");
BinaryDocValues bcf = reader.getBinaryDocValues("bcf");
for (int i = 0; i < reader.maxDoc(); i++) {
if (liveDocs == null || liveDocs.get(i)) {
assertEquals(i, f.advance(i));
assertEquals(i, cf.advance(i));
assertEquals(i, bf.advance(i));
assertEquals(i, bcf.advance(i));
assertEquals("doc=" + (docBase + i), cf.longValue(), f.longValue() * 2);
assertEquals("doc=" + (docBase + i), TestBinaryDocValuesUpdates.getValue(bcf), TestBinaryDocValuesUpdates.getValue(bf) * 2);
}
}
}
r.close();
// Sometimes re-use RIW, other times open new one:
if (w != null && random().nextBoolean()) {
if (VERBOSE) {
System.out.println("TEST: close writer");
}
w.close();
w = null;
}
docBase += numDocs;
}
if (w != null) {
w.close();
}
// Final verify:
if (tragic == false) {
IndexReader r = DirectoryReader.open(dir);
assertEquals(docCount - deleteCount, r.numDocs());
r.close();
}
dir.close();
}
Aggregations