use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class ClientCQImpl method close.
@Override
public void close(boolean sendRequestToServer) throws CqClosedException, CqException {
final boolean isDebugEnabled = logger.isDebugEnabled();
if (isDebugEnabled) {
logger.debug("Started closing CQ CqName: {} SendRequestToServer: {}", cqName, sendRequestToServer);
}
// Synchronize with stop and execute CQ commands
synchronized (this.cqState) {
// Check if the cq is already closed.
if (this.isClosed()) {
// throw new CqClosedException("CQ is already closed, CqName : " + this.cqName);
if (isDebugEnabled) {
logger.debug("CQ is already closed, CqName: {}", this.cqName);
}
return;
}
int stateBeforeClosing = this.cqState.getState();
this.cqState.setState(CqStateImpl.CLOSING);
boolean isClosed = false;
// Client Close. Proxy is null in case of server.
// Check if this has been sent to server, if so send
// Close request to server.
Exception exception = null;
if (this.cqProxy != null && sendRequestToServer) {
try {
if (this.proxyCache != null) {
if (this.proxyCache.isClosed()) {
throw new CacheClosedException("Cache is closed for this user.");
}
UserAttributes.userAttributes.set(this.proxyCache.getUserAttributes());
}
cqProxy.close(this);
isClosed = true;
} catch (CancelException e) {
throw e;
} catch (Exception ex) {
if (shutdownInProgress()) {
return;
}
exception = ex;
} finally {
UserAttributes.userAttributes.set(null);
}
}
// Cleanup the resource used by cq.
this.removeFromCqMap();
if (cqProxy == null || !sendRequestToServer || isClosed) {
// Stat update.
if (stateBeforeClosing == CqStateImpl.RUNNING) {
cqService.stats().decCqsActive();
} else if (stateBeforeClosing == CqStateImpl.STOPPED) {
cqService.stats().decCqsStopped();
}
// Set the state to close, and update stats
this.cqState.setState(CqStateImpl.CLOSED);
cqService.stats().incCqsClosed();
cqService.stats().decCqsOnClient();
if (this.stats != null)
this.stats.close();
} else {
if (shutdownInProgress()) {
return;
}
// Hasn't able to send close request to any server.
if (exception != null) {
throw new CqException(LocalizedStrings.CqQueryImpl_FAILED_TO_CLOSE_THE_CQ_CQNAME_0_ERROR_FROM_LAST_ENDPOINT_1.toLocalizedString(this.cqName, exception.getLocalizedMessage()), exception.getCause());
} else {
throw new CqException(LocalizedStrings.CqQueryImpl_FAILED_TO_CLOSE_THE_CQ_CQNAME_0_THE_SERVER_ENDPOINTS_ON_WHICH_THIS_CQ_WAS_REGISTERED_WERE_NOT_FOUND.toLocalizedString(this.cqName));
}
}
}
// Invoke close on Listeners if any.
if (this.cqAttributes != null) {
CqListener[] cqListeners = this.getCqAttributes().getCqListeners();
if (cqListeners != null) {
if (isDebugEnabled) {
logger.debug("Invoking CqListeners close() api for the CQ, CqName: {} Number of CqListeners: {}", cqName, cqListeners.length);
}
for (int lCnt = 0; lCnt < cqListeners.length; lCnt++) {
try {
cqListeners[lCnt].close();
// Handle client side exceptions.
} catch (Exception ex) {
logger.warn(LocalizedMessage.create(LocalizedStrings.CqQueryImpl_EXCEPTION_OCCOURED_IN_THE_CQLISTENER_OF_THE_CQ_CQNAME_0_ERROR_1, new Object[] { cqName, ex.getLocalizedMessage() }));
if (isDebugEnabled) {
logger.debug(ex.getMessage(), ex);
}
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
logger.warn(LocalizedMessage.create(LocalizedStrings.CqQueryImpl_RUNTIMEEXCEPTION_OCCOURED_IN_THE_CQLISTENER_OF_THE_CQ_CQNAME_0_ERROR_1, new Object[] { cqName, t.getLocalizedMessage() }));
if (isDebugEnabled) {
logger.debug(t.getMessage(), t);
}
}
}
}
}
if (isDebugEnabled) {
logger.debug("Successfully closed the CQ. {}", cqName);
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class ClientCQImpl method stop.
/**
* Stop or pause executing the query.
*/
@Override
public void stop() throws CqClosedException, CqException {
boolean isStopped = false;
synchronized (this.cqState) {
if (this.isClosed()) {
throw new CqClosedException(LocalizedStrings.CqQueryImpl_CQ_IS_CLOSED_CQNAME_0.toLocalizedString(this.cqName));
}
if (!(this.isRunning())) {
throw new IllegalStateException(LocalizedStrings.CqQueryImpl_CQ_IS_NOT_IN_RUNNING_STATE_STOP_CQ_DOES_NOT_APPLY_CQNAME_0.toLocalizedString(this.cqName));
}
Exception exception = null;
try {
if (this.proxyCache != null) {
if (this.proxyCache.isClosed()) {
throw new CacheClosedException("Cache is closed for this user.");
}
UserAttributes.userAttributes.set(this.proxyCache.getUserAttributes());
}
cqProxy.stop(this);
isStopped = true;
} catch (Exception e) {
exception = e;
} finally {
UserAttributes.userAttributes.set(null);
}
if (cqProxy == null || isStopped) {
// Change state and stats on the client side
this.cqState.setState(CqStateImpl.STOPPED);
this.cqService.stats().incCqsStopped();
this.cqService.stats().decCqsActive();
if (logger.isDebugEnabled()) {
logger.debug("Successfully stopped the CQ. {}", cqName);
}
} else {
// Hasn't able to send stop request to any server.
if (exception != null) {
throw new CqException(LocalizedStrings.CqQueryImpl_FAILED_TO_STOP_THE_CQ_CQNAME_0_ERROR_FROM_LAST_SERVER_1.toLocalizedString(this.cqName, exception.getLocalizedMessage()), exception.getCause());
} else {
throw new CqException(LocalizedStrings.CqQueryImpl_FAILED_TO_STOP_THE_CQ_CQNAME_0_THE_SERVER_ENDPOINTS_ON_WHICH_THIS_CQ_WAS_REGISTERED_WERE_NOT_FOUND.toLocalizedString(this.cqName));
}
}
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class LuceneQueryFunction method getLuceneIndex.
private LuceneIndexImpl getLuceneIndex(final Region region, final LuceneFunctionContext<IndexResultCollector> searchContext) {
LuceneService service = LuceneServiceProvider.get(region.getCache());
LuceneIndexImpl index = null;
try {
index = (LuceneIndexImpl) service.getIndex(searchContext.getIndexName(), region.getFullPath());
if (index == null) {
while (service instanceof LuceneServiceImpl && (((LuceneServiceImpl) service).getDefinedIndex(searchContext.getIndexName(), region.getFullPath()) != null)) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
return null;
}
region.getCache().getCancelCriterion().checkCancelInProgress(null);
}
index = (LuceneIndexImpl) service.getIndex(searchContext.getIndexName(), region.getFullPath());
}
} catch (CacheClosedException e) {
throw new InternalFunctionInvocationTargetException("Cache is closed when attempting to retrieve index:" + region.getFullPath(), e);
}
return index;
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class LuceneQueryFunction method execute.
@Override
public void execute(FunctionContext context) {
RegionFunctionContext ctx = (RegionFunctionContext) context;
ResultSender<TopEntriesCollector> resultSender = ctx.getResultSender();
Region region = ctx.getDataSet();
LuceneFunctionContext<IndexResultCollector> searchContext = (LuceneFunctionContext) ctx.getArguments();
if (searchContext == null) {
throw new IllegalArgumentException("Missing search context");
}
LuceneQueryProvider queryProvider = searchContext.getQueryProvider();
if (queryProvider == null) {
throw new IllegalArgumentException("Missing query provider");
}
LuceneIndexImpl index = getLuceneIndex(region, searchContext);
if (index == null) {
throw new LuceneIndexNotFoundException(searchContext.getIndexName(), region.getFullPath());
}
RepositoryManager repoManager = index.getRepositoryManager();
LuceneIndexStats stats = index.getIndexStats();
Query query = getQuery(queryProvider, index);
if (logger.isDebugEnabled()) {
logger.debug("Executing lucene query: {}, on region {}", query, region.getFullPath());
}
int resultLimit = searchContext.getLimit();
CollectorManager manager = (searchContext == null) ? null : searchContext.getCollectorManager();
if (manager == null) {
manager = new TopEntriesCollectorManager(null, resultLimit);
}
Collection<IndexResultCollector> results = new ArrayList<>();
TopEntriesCollector mergedResult = null;
try {
long start = stats.startQuery();
Collection<IndexRepository> repositories = null;
try {
repositories = repoManager.getRepositories(ctx);
for (IndexRepository repo : repositories) {
IndexResultCollector collector = manager.newCollector(repo.toString());
if (logger.isDebugEnabled()) {
logger.debug("Executing search on repo: " + repo.toString());
}
repo.query(query, resultLimit, collector);
results.add(collector);
}
mergedResult = (TopEntriesCollector) manager.reduce(results);
} finally {
stats.endQuery(start, mergedResult == null ? 0 : mergedResult.size());
}
stats.incNumberOfQueryExecuted();
resultSender.lastResult(mergedResult);
} catch (IOException | BucketNotFoundException | CacheClosedException | PrimaryBucketException e) {
logger.debug("Exception during lucene query function", e);
throw new InternalFunctionInvocationTargetException(e);
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class PersistPRKRFDUnitTest method testCloseDiskStoreWhenPut.
/**
* do a put/modify/destroy while closing disk store
*
* to turn on debug, add following parameter in local.conf: hydra.VmPrms-extraVMArgs +=
* "-Ddisk.KRF_DEBUG=true";
*/
@Test
public void testCloseDiskStoreWhenPut() {
final String title = "testCloseDiskStoreWhenPut:";
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
createPR(vm0, 0);
createData(vm0, 0, 10, "a");
vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
// let the region to hold on the put until diskstore is closed
if (!DiskStoreImpl.KRF_DEBUG) {
region.getAttributesMutator().setCacheWriter(new MyWriter());
}
}
});
// create test
AsyncInvocation async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async create") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
try {
region.put(10, "b");
fail("Expect CacheClosedException here");
} catch (CacheClosedException cce) {
System.out.println(title + cce.getMessage());
if (DiskStoreImpl.KRF_DEBUG) {
assert cce.getMessage().contains("The disk store is closed.");
} else {
assert cce.getMessage().contains("The disk store is closed");
}
} finally {
expect.remove();
}
}
});
vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {
public void run2() throws CacheException {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
Wait.pause(500);
gfc.closeDiskStores();
synchronized (lockObject) {
lockObject.notify();
}
}
});
ThreadUtils.join(async1, MAX_WAIT);
closeCache(vm0);
// update
createPR(vm0, 0);
vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
// let the region to hold on the put until diskstore is closed
if (!DiskStoreImpl.KRF_DEBUG) {
region.getAttributesMutator().setCacheWriter(new MyWriter());
}
}
});
async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async update") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
try {
region.put(1, "b");
fail("Expect CacheClosedException here");
} catch (CacheClosedException cce) {
System.out.println(title + cce.getMessage());
if (DiskStoreImpl.KRF_DEBUG) {
assert cce.getMessage().contains("The disk store is closed.");
} else {
assert cce.getMessage().contains("The disk store is closed");
}
} finally {
expect.remove();
}
}
});
vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {
public void run2() throws CacheException {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
Wait.pause(500);
gfc.closeDiskStores();
synchronized (lockObject) {
lockObject.notify();
}
}
});
ThreadUtils.join(async1, MAX_WAIT);
closeCache(vm0);
// destroy
createPR(vm0, 0);
vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
// let the region to hold on the put until diskstore is closed
if (!DiskStoreImpl.KRF_DEBUG) {
region.getAttributesMutator().setCacheWriter(new MyWriter());
}
}
});
async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async destroy") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
try {
region.destroy(2, "b");
fail("Expect CacheClosedException here");
} catch (CacheClosedException cce) {
System.out.println(title + cce.getMessage());
if (DiskStoreImpl.KRF_DEBUG) {
assert cce.getMessage().contains("The disk store is closed.");
} else {
assert cce.getMessage().contains("The disk store is closed");
}
} finally {
expect.remove();
}
}
});
vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {
public void run2() throws CacheException {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
Wait.pause(500);
gfc.closeDiskStores();
synchronized (lockObject) {
lockObject.notify();
}
}
});
ThreadUtils.join(async1, MAX_WAIT);
checkData(vm0, 0, 10, "a");
checkData(vm0, 10, 11, null);
closeCache(vm0);
}
Aggregations