use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testRecoverySystemWithConcurrentPutter.
/**
* Test what happens when we restart persistent members while there is an accessor concurrently
* performing puts. This is for bug 43899
*/
@Test
public void testRecoverySystemWithConcurrentPutter() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
// Define all of the runnables used in this test
// runnable to create accessors
SerializableRunnable createAccessor = new SerializableRunnable("createAccessor") {
public void run() {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setLocalMaxMemory(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PARTITION);
cache.createRegion(PR_REGION_NAME, af.create());
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
}
};
// runnable to create PRs
SerializableRunnable createPRs = new SerializableRunnable("createPRs") {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
}
};
// runnable to close the cache.
SerializableRunnable closeCache = new SerializableRunnable("closeCache") {
public void run() {
closeCache();
}
};
// Runnable to do a bunch of puts handle exceptions
// due to the fact that member is offline.
SerializableRunnable doABunchOfPuts = new SerializableRunnable("doABunchOfPuts") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
try {
for (int i = 0; ; i++) {
try {
region.get(i % NUM_BUCKETS);
} catch (PartitionOfflineException expected) {
// do nothing.
} catch (PartitionedRegionStorageException expected) {
// do nothing.
}
Thread.yield();
}
} catch (CacheClosedException expected) {
// ok, we're done.
}
}
};
// Runnable to clean up disk dirs on a members
SerializableRunnable cleanDiskDirs = new SerializableRunnable("Clean disk dirs") {
public void run() {
try {
cleanDiskDirs();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
// Create the PR two members
vm1.invoke(createPRs);
vm2.invoke(createPRs);
// create the accessor.
vm0.invoke(createAccessor);
// Create some buckets.
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "a", "region2");
// backup the system. We use this to get a snapshot of vm1 and vm2
// when they both are online. Recovering from this backup simulates
// a simulataneous kill and recovery.
backup(vm3);
// close vm1 and vm2.
vm1.invoke(closeCache);
vm2.invoke(closeCache);
// restore the backup
vm1.invoke(cleanDiskDirs);
vm2.invoke(cleanDiskDirs);
restoreBackup(2);
// in vm0, start doing a bunch of concurrent puts.
AsyncInvocation async0 = vm0.invokeAsync(doABunchOfPuts);
// This recovery should not hang (that's what we're testing for
// here.
AsyncInvocation async1 = vm1.invokeAsync(createPRs);
AsyncInvocation async2 = vm2.invokeAsync(createPRs);
async1.getResult(MAX_WAIT);
async2.getResult(MAX_WAIT);
// close the cache in vm0 to stop the async puts.
vm0.invoke(closeCache);
// make sure we didn't get an exception
async0.getResult(MAX_WAIT);
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testDiskConflictWithCoLocation.
@Test
public void testDiskConflictWithCoLocation() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
// createPR(vm0, 1);
createCoLocatedPR(vm0, 1, false);
// create some buckets
createData(vm0, 0, 2, "a");
createData(vm0, 0, 2, "a", PR_CHILD_REGION_NAME);
closePR(vm0, PR_CHILD_REGION_NAME);
closePR(vm0);
// createPR(vm1, 1);
createCoLocatedPR(vm1, 1, false);
// create an overlapping bucket
createData(vm1, 2, 4, "a");
createData(vm1, 2, 4, "a", PR_CHILD_REGION_NAME);
IgnoredException[] expectVm0 = { IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0), IgnoredException.addIgnoredException("CacheClosedException", vm0) };
try {
createCoLocatedPR(vm0, 1, true);
// Cache should have closed due to ConflictingPersistentDataException
vm0.invoke(() -> {
Awaitility.await().atMost(MAX_WAIT, TimeUnit.MILLISECONDS).until(() -> basicGetCache().isClosed());
basicGetCache().getCancelCriterion();
});
} catch (Exception ex) {
boolean expectedException = false;
if (ex.getCause() instanceof CacheClosedException) {
CacheClosedException cce = (CacheClosedException) ex.getCause();
if (cce.getCause() instanceof ConflictingPersistentDataException) {
expectedException = true;
}
}
if (!expectedException) {
throw ex;
}
} finally {
for (IgnoredException ie : expectVm0) {
ie.remove();
}
}
closePR(vm1, PR_CHILD_REGION_NAME);
closePR(vm1);
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class PersistentRecoveryOrderDUnitTest method testCloseDuringRegionOperation.
@Test
public void testCloseDuringRegionOperation() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
createPersistentRegion(vm0);
createPersistentRegion(vm1);
// Try to make sure there are some operations in flight while closing the cache
SerializableCallable createData0 = new SerializableCallable() {
public Object call() {
Cache cache = getCache();
Region region = cache.getRegion(REGION_NAME);
int i = 0;
while (true) {
try {
region.put(0, i);
i++;
} catch (RegionDestroyedException e) {
break;
} catch (CacheClosedException e) {
break;
}
}
return i - 1;
}
};
SerializableCallable createData1 = new SerializableCallable() {
public Object call() {
Cache cache = getCache();
Region region = cache.getRegion(REGION_NAME);
int i = 0;
while (true) {
try {
region.put(1, i);
i++;
} catch (RegionDestroyedException e) {
break;
} catch (CacheClosedException e) {
break;
}
}
return i - 1;
}
};
AsyncInvocation asyncCreate0 = vm0.invokeAsync(createData0);
AsyncInvocation asyncCreate1 = vm1.invokeAsync(createData1);
Thread.sleep(500);
AsyncInvocation close0 = closeCacheAsync(vm0);
AsyncInvocation close1 = closeCacheAsync(vm1);
// wait for the close to finish
close0.getResult();
close1.getResult();
Integer lastSuccessfulInt0 = (Integer) asyncCreate0.getResult();
Integer lastSuccessfulInt1 = (Integer) asyncCreate1.getResult();
System.err.println("Cache was closed on 0->" + lastSuccessfulInt0 + ",1->" + lastSuccessfulInt1);
AsyncInvocation create1 = createPersistentRegionAsync(vm0);
AsyncInvocation create2 = createPersistentRegionAsync(vm1);
create1.getResult();
create2.getResult();
checkConcurrentCloseValue(vm0, vm1, 0, lastSuccessfulInt0);
checkConcurrentCloseValue(vm0, vm1, 1, lastSuccessfulInt1);
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class LuceneGetPageFunction method execute.
@Override
public void execute(FunctionContext context) {
try {
RegionFunctionContext ctx = (RegionFunctionContext) context;
Region region = PartitionRegionHelper.getLocalDataForContext(ctx);
Set<?> keys = ctx.getFilter();
List<PageEntry> results = new PageResults(keys.size());
for (Object key : keys) {
PageEntry entry = getEntry(region, key);
if (entry != null) {
results.add(entry);
}
}
ctx.getResultSender().lastResult(results);
} catch (CacheClosedException | PrimaryBucketException e) {
logger.debug("Exception during lucene query function", e);
throw new InternalFunctionInvocationTargetException(e);
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class ClientCQImpl method executeCqOnRedundantsAndPrimary.
/**
* This executes the CQ first on the redundant server and then on the primary server. This is
* required to keep the redundancy behavior in accordance with the HAQueue expectation (wherein
* the events are delivered only from the primary).
*
* @param executeWithInitialResults boolean
* @return Object SelectResults in case of executeWithInitialResults
*/
private Object executeCqOnRedundantsAndPrimary(boolean executeWithInitialResults) throws CqClosedException, RegionNotFoundException, CqException {
Object initialResults = null;
synchronized (this.cqState) {
if (this.isClosed()) {
throw new CqClosedException(LocalizedStrings.CqQueryImpl_CQ_IS_CLOSED_CQNAME_0.toLocalizedString(this.cqName));
}
if (this.isRunning()) {
throw new IllegalStateException(LocalizedStrings.CqQueryImpl_CQ_IS_IN_RUNNING_STATE_CQNAME_0.toLocalizedString(this.cqName));
}
if (logger.isDebugEnabled()) {
logger.debug("Performing Execute {} request for CQ. CqName: {}", ((executeWithInitialResults) ? "WithInitialResult" : ""), this.cqName);
}
this.cqBaseRegion = (LocalRegion) cqService.getCache().getRegion(this.regionName);
// If not server send the request to server.
if (!cqService.isServer()) {
// pool that initializes the CQ. Else its set using the Region proxy.
if (this.cqProxy == null) {
initConnectionProxy();
}
boolean success = false;
try {
if (this.proxyCache != null) {
if (this.proxyCache.isClosed()) {
throw new CacheClosedException("Cache is closed for this user.");
}
UserAttributes.userAttributes.set(this.proxyCache.getUserAttributes());
}
if (executeWithInitialResults) {
initialResults = cqProxy.createWithIR(this);
if (initialResults == null) {
String errMsg = "Failed to execute the CQ. CqName: " + this.cqName + ", Query String is: " + this.queryString;
throw new CqException(errMsg);
}
} else {
cqProxy.create(this);
}
success = true;
} catch (Exception ex) {
// Check for system shutdown.
if (this.shutdownInProgress()) {
throw new CqException("System shutdown in progress.");
}
if (ex.getCause() instanceof GemFireSecurityException) {
if (securityLogWriter.warningEnabled()) {
securityLogWriter.warning(LocalizedStrings.CqQueryImpl_EXCEPTION_WHILE_EXECUTING_CQ_EXCEPTION_0, ex, null);
}
throw new CqException(ex.getCause().getMessage(), ex.getCause());
} else if (ex instanceof CqException) {
throw (CqException) ex;
} else {
String errMsg = LocalizedStrings.CqQueryImpl_FAILED_TO_EXECUTE_THE_CQ_CQNAME_0_QUERY_STRING_IS_1_ERROR_FROM_LAST_SERVER_2.toLocalizedString(this.cqName, this.queryString, ex.getLocalizedMessage());
if (logger.isDebugEnabled()) {
logger.debug(errMsg, ex);
}
throw new CqException(errMsg, ex);
}
} finally {
if (!success && !this.shutdownInProgress()) {
try {
cqProxy.close(this);
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("Exception cleaning up failed cq", e);
}
UserAttributes.userAttributes.set(null);
}
}
UserAttributes.userAttributes.set(null);
}
}
this.cqState.setState(CqStateImpl.RUNNING);
}
// If client side, alert listeners that a cqs have been connected
if (!cqService.isServer()) {
connected = true;
CqListener[] cqListeners = getCqAttributes().getCqListeners();
for (int lCnt = 0; lCnt < cqListeners.length; lCnt++) {
if (cqListeners[lCnt] != null) {
if (cqListeners[lCnt] instanceof CqStatusListener) {
CqStatusListener listener = (CqStatusListener) cqListeners[lCnt];
listener.onCqConnected();
}
}
}
}
// Update CQ-base region for book keeping.
this.cqService.stats().incCqsActive();
this.cqService.stats().decCqsStopped();
return initialResults;
}
Aggregations