use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestReaderClosed method testReaderChaining.
// LUCENE-3800
public void testReaderChaining() throws Exception {
assertTrue(reader.getRefCount() > 0);
LeafReader wrappedReader = new ParallelLeafReader(getOnlyLeafReader(reader));
// We wrap with a OwnCacheKeyMultiReader so that closing the underlying reader
// does not terminate the threadpool (if that index searcher uses one)
IndexSearcher searcher = newSearcher(new OwnCacheKeyMultiReader(wrappedReader));
TermRangeQuery query = TermRangeQuery.newStringRange("field", "a", "z", true, true);
searcher.search(query, 5);
// close original child reader
reader.close();
try {
searcher.search(query, 5);
} catch (Exception e) {
AlreadyClosedException ace = null;
for (Throwable t = e; t != null; t = t.getCause()) {
if (t instanceof AlreadyClosedException) {
ace = (AlreadyClosedException) t;
}
}
if (ace == null) {
throw new AssertionError("Query failed, but not due to an AlreadyClosedException", e);
}
assertEquals("this IndexReader cannot be used anymore as one of its child readers was closed", ace.getMessage());
} finally {
// close executor: in case of wrap-wrap-wrapping
searcher.getIndexReader().close();
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestReaderClosed method test.
public void test() throws Exception {
assertTrue(reader.getRefCount() > 0);
IndexSearcher searcher = newSearcher(reader);
TermRangeQuery query = TermRangeQuery.newStringRange("field", "a", "z", true, true);
searcher.search(query, 5);
reader.close();
try {
searcher.search(query, 5);
} catch (AlreadyClosedException ace) {
// expected
} catch (RejectedExecutionException ree) {
// expected if the searcher has been created with threads since LuceneTestCase
// closes the thread-pool in a reader close listener
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class SpellChecker method swapSearcher.
private void swapSearcher(final Directory dir) throws IOException {
/*
* opening a searcher is possibly very expensive.
* We rather close it again if the Spellchecker was closed during
* this operation than block access to the current searcher while opening.
*/
final IndexSearcher indexSearcher = createSearcher(dir);
synchronized (searcherLock) {
if (closed) {
indexSearcher.getIndexReader().close();
throw new AlreadyClosedException("Spellchecker has been closed");
}
if (searcher != null) {
searcher.getIndexReader().close();
}
// set the spellindex in the sync block - ensure consistency.
searcher = indexSearcher;
this.spellIndex = dir;
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestIndexWriterOnDiskFull method testAddDocumentOnDiskFull.
/*
* Make sure IndexWriter cleans up on hitting a disk
* full exception in addDocument.
* TODO: how to do this on windows with FSDirectory?
*/
public void testAddDocumentOnDiskFull() throws IOException {
for (int pass = 0; pass < 2; pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
boolean doAbort = pass == 1;
long diskFree = TestUtil.nextInt(random(), 100, 300);
boolean indexExists = false;
while (true) {
if (VERBOSE) {
System.out.println("TEST: cycle: diskFree=" + diskFree);
}
MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), new RAMDirectory());
dir.setMaxSizeInBytes(diskFree);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
MergeScheduler ms = writer.getConfig().getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
// This test intentionally produces exceptions
// in the threads that CMS launches; we don't
// want to pollute test output with these.
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
}
boolean hitError = false;
try {
for (int i = 0; i < 200; i++) {
addDoc(writer);
}
if (VERBOSE) {
System.out.println("TEST: done adding docs; now commit");
}
writer.commit();
indexExists = true;
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on addDoc");
e.printStackTrace(System.out);
}
hitError = true;
}
if (hitError) {
if (doAbort) {
if (VERBOSE) {
System.out.println("TEST: now rollback");
}
writer.rollback();
} else {
try {
if (VERBOSE) {
System.out.println("TEST: now close");
}
writer.close();
} catch (IOException e) {
if (VERBOSE) {
System.out.println("TEST: exception on close; retry w/ no disk space limit");
e.printStackTrace(System.out);
}
dir.setMaxSizeInBytes(0);
try {
writer.close();
} catch (AlreadyClosedException ace) {
// OK
}
}
}
if (indexExists) {
// Make sure reader can open the index:
DirectoryReader.open(dir).close();
}
dir.close();
// Now try again w/ more space:
diskFree += TEST_NIGHTLY ? TestUtil.nextInt(random(), 400, 600) : TestUtil.nextInt(random(), 3000, 5000);
} else {
//_TestUtil.syncConcurrentMerges(writer);
dir.setMaxSizeInBytes(0);
writer.close();
dir.close();
break;
}
}
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestIndexWriterMerging method testNoWaitClose.
@Slow
public void testNoWaitClose() throws Throwable {
Directory directory = newDirectory();
final Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.setTokenized(false);
Field idField = newField("id", "", customType);
doc.add(idField);
for (int pass = 0; pass < 2; pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()).setCommitOnClose(false);
if (pass == 2) {
conf.setMergeScheduler(new SerialMergeScheduler());
}
IndexWriter writer = new IndexWriter(directory, conf);
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
for (int iter = 0; iter < 10; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
for (int j = 0; j < 199; j++) {
idField.setStringValue(Integer.toString(iter * 201 + j));
writer.addDocument(doc);
}
int delID = iter * 199;
for (int j = 0; j < 20; j++) {
writer.deleteDocuments(new Term("id", Integer.toString(delID)));
delID += 5;
}
writer.commit();
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
final IndexWriter finalWriter = writer;
final AtomicReference<Throwable> failure = new AtomicReference<>();
Thread t1 = new Thread() {
@Override
public void run() {
boolean done = false;
while (!done) {
for (int i = 0; i < 100; i++) {
try {
finalWriter.addDocument(doc);
} catch (AlreadyClosedException e) {
done = true;
break;
} catch (NullPointerException e) {
done = true;
break;
} catch (Throwable e) {
e.printStackTrace(System.out);
failure.set(e);
done = true;
break;
}
}
Thread.yield();
}
}
};
t1.start();
writer.close();
t1.join();
if (failure.get() != null) {
throw failure.get();
}
// Make sure reader can read
IndexReader reader = DirectoryReader.open(directory);
reader.close();
// Reopen
writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()).setCommitOnClose(false));
}
writer.close();
}
directory.close();
}
Aggregations