use of org.apache.lucene.store.AlreadyClosedException in project elasticsearch by elastic.
the class SearchServiceTests method testSearchWhileIndexDeleted.
public void testSearchWhileIndexDeleted() throws IOException, InterruptedException {
createIndex("index");
client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
SearchService service = getInstanceFromNode(SearchService.class);
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
IndexShard indexShard = indexService.getShard(0);
AtomicBoolean running = new AtomicBoolean(true);
CountDownLatch startGun = new CountDownLatch(1);
Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
final Thread thread = new Thread() {
@Override
public void run() {
startGun.countDown();
while (running.get()) {
service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED);
if (randomBoolean()) {
// context in a non-sane way.
try {
semaphore.acquire();
} catch (InterruptedException e) {
throw new AssertionError(e);
}
client().prepareIndex("index", "type").setSource("field", "value").setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).execute(new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
semaphore.release();
}
@Override
public void onFailure(Exception e) {
semaphore.release();
}
});
}
}
}
};
thread.start();
startGun.await();
try {
final int rounds = scaledRandomIntBetween(100, 10000);
for (int i = 0; i < rounds; i++) {
try {
QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), new SearchTask(123L, "", "", "", null));
IntArrayList intCursors = new IntArrayList(1);
intCursors.add(0);
ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null);
service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null));
} catch (AlreadyClosedException ex) {
throw ex;
} catch (IllegalStateException ex) {
assertEquals("search context is already closed can't increment refCount current count [0]", ex.getMessage());
} catch (SearchContextMissingException ex) {
// that's fine
}
}
} finally {
running.set(false);
thread.join();
semaphore.acquire(Integer.MAX_VALUE);
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class CachingDirectoryFactory method get.
/*
* (non-Javadoc)
*
* @see org.apache.solr.core.DirectoryFactory#get(java.lang.String,
* java.lang.String, boolean)
*/
@Override
public final Directory get(String path, DirContext dirContext, String rawLockType) throws IOException {
String fullPath = normalize(path);
synchronized (this) {
if (closed) {
throw new AlreadyClosedException("Already closed");
}
final CacheValue cacheValue = byPathCache.get(fullPath);
Directory directory = null;
if (cacheValue != null) {
directory = cacheValue.directory;
}
if (directory == null) {
directory = create(fullPath, createLockFactory(rawLockType), dirContext);
assert ObjectReleaseTracker.track(directory);
boolean success = false;
try {
CacheValue newCacheValue = new CacheValue(fullPath, directory);
byDirectoryCache.put(directory, newCacheValue);
byPathCache.put(fullPath, newCacheValue);
log.debug("return new directory for {}", fullPath);
success = true;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(directory);
}
}
} else {
cacheValue.refCnt++;
log.debug("Reusing cached directory: {}", cacheValue);
}
return directory;
}
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class MetricsMap method getMBeanInfo.
@Override
public MBeanInfo getMBeanInfo() {
ArrayList<MBeanAttributeInfo> attrInfoList = new ArrayList<>();
Map<String, Object> stats = getValue(true);
if (useCachedStatsBetweenGetMBeanInfoCalls) {
cachedValue = stats;
}
try {
stats.forEach((k, v) -> {
Class type = v.getClass();
OpenType typeBox = determineType(type);
if (type.equals(String.class) || typeBox == null) {
attrInfoList.add(new MBeanAttributeInfo(k, String.class.getName(), null, true, false, false));
} else {
attrInfoList.add(new OpenMBeanAttributeInfoSupport(k, k, typeBox, true, false, false));
}
});
} catch (Exception e) {
// don't log issue if the core is closing
if (!(SolrException.getRootCause(e) instanceof AlreadyClosedException))
log.warn("Could not get attributes of MetricsMap: {}", this, e);
}
MBeanAttributeInfo[] attrInfoArr = attrInfoList.toArray(new MBeanAttributeInfo[attrInfoList.size()]);
return new MBeanInfo(getClass().getName(), "MetricsMap", attrInfoArr, null, null, null);
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestIndexFileDeleter method testExcInDecRef.
// LUCENE-5919
public void testExcInDecRef() throws Throwable {
MockDirectoryWrapper dir = newMockDirectory();
// disable slow things: we don't rely upon sleeps here.
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
dir.setUseSlowOpenClosers(false);
final AtomicBoolean doFailExc = new AtomicBoolean();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (doFailExc.get() && random().nextInt(4) == 1) {
Exception e = new Exception();
StackTraceElement[] stack = e.getStackTrace();
for (int i = 0; i < stack.length; i++) {
if (stack[i].getClassName().equals(IndexFileDeleter.class.getName()) && stack[i].getMethodName().equals("decRef")) {
throw new RuntimeException("fake fail");
}
}
}
}
});
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
//iwc.setMergeScheduler(new SerialMergeScheduler());
MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
final ConcurrentMergeScheduler suppressFakeFail = new ConcurrentMergeScheduler() {
@Override
protected void handleMergeException(Directory dir, Throwable exc) {
// suppress only FakeIOException:
if (exc instanceof RuntimeException && exc.getMessage().equals("fake fail")) {
// ok to ignore
} else if ((exc instanceof AlreadyClosedException || exc instanceof IllegalStateException) && exc.getCause() != null && "fake fail".equals(exc.getCause().getMessage())) {
// also ok to ignore
} else {
super.handleMergeException(dir, exc);
}
}
};
final ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) ms;
suppressFakeFail.setMaxMergesAndThreads(cms.getMaxMergeCount(), cms.getMaxThreadCount());
iwc.setMergeScheduler(suppressFakeFail);
}
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
// Since we hit exc during merging, a partial
// forceMerge can easily return when there are still
// too many segments in the index:
w.setDoRandomForceMergeAssert(false);
doFailExc.set(true);
int ITERS = atLeast(1000);
for (int iter = 0; iter < ITERS; iter++) {
try {
if (random().nextInt(10) == 5) {
w.commit();
} else if (random().nextInt(10) == 7) {
w.getReader().close();
} else {
Document doc = new Document();
doc.add(newTextField("field", "some text", Field.Store.NO));
w.addDocument(doc);
}
} catch (Throwable t) {
if (t.toString().contains("fake fail") || (t.getCause() != null && t.getCause().toString().contains("fake fail"))) {
// ok
} else {
throw t;
}
}
}
doFailExc.set(false);
w.close();
dir.close();
}
use of org.apache.lucene.store.AlreadyClosedException in project lucene-solr by apache.
the class TestConcurrentMergeScheduler method testFlushExceptions.
// Make sure running BG merges still work fine even when
// we are hitting exceptions during flushing.
public void testFlushExceptions() throws IOException {
MockDirectoryWrapper directory = newMockDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.failOn(failure);
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2);
if (iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler) {
iwc.setMergeScheduler(new SuppressingConcurrentMergeScheduler() {
@Override
protected boolean isOK(Throwable th) {
return th instanceof AlreadyClosedException || (th instanceof IllegalStateException && th.getMessage().contains("this writer hit an unrecoverable error"));
}
});
}
IndexWriter writer = new IndexWriter(directory, iwc);
Document doc = new Document();
Field idField = newStringField("id", "", Field.Store.YES);
doc.add(idField);
outer: for (int i = 0; i < 10; i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
}
for (int j = 0; j < 20; j++) {
idField.setStringValue(Integer.toString(i * 20 + j));
writer.addDocument(doc);
}
// flush, and we don't hit the exception
while (true) {
writer.addDocument(doc);
failure.setDoFail();
try {
writer.flush(true, true);
if (failure.hitExc) {
fail("failed to hit IOException");
}
} catch (IOException ioe) {
if (VERBOSE) {
ioe.printStackTrace(System.out);
}
failure.clearDoFail();
assertTrue(writer.isClosed());
// Abort should have closed the deleter:
assertTrue(writer.deleter.isClosed());
break outer;
}
}
}
assertFalse(DirectoryReader.indexExists(directory));
directory.close();
}
Aggregations