use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PersistPRKRFDUnitTest method testCloseDiskStoreWhenPut.
/**
* do a put/modify/destroy while closing disk store
*
* to turn on debug, add following parameter in local.conf: hydra.VmPrms-extraVMArgs +=
* "-Ddisk.KRF_DEBUG=true";
*/
@Test
public void testCloseDiskStoreWhenPut() {
final String title = "testCloseDiskStoreWhenPut:";
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
createPR(vm0, 0);
createData(vm0, 0, 10, "a");
vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
// let the region to hold on the put until diskstore is closed
if (!DiskStoreImpl.KRF_DEBUG) {
region.getAttributesMutator().setCacheWriter(new MyWriter());
}
}
});
// create test
AsyncInvocation async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async create") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
try {
region.put(10, "b");
fail("Expect CacheClosedException here");
} catch (CacheClosedException cce) {
System.out.println(title + cce.getMessage());
if (DiskStoreImpl.KRF_DEBUG) {
assert cce.getMessage().contains("The disk store is closed.");
} else {
assert cce.getMessage().contains("The disk store is closed");
}
} finally {
expect.remove();
}
}
});
vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {
public void run2() throws CacheException {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
Wait.pause(500);
gfc.closeDiskStores();
synchronized (lockObject) {
lockObject.notify();
}
}
});
ThreadUtils.join(async1, MAX_WAIT);
closeCache(vm0);
// update
createPR(vm0, 0);
vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
// let the region to hold on the put until diskstore is closed
if (!DiskStoreImpl.KRF_DEBUG) {
region.getAttributesMutator().setCacheWriter(new MyWriter());
}
}
});
async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async update") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
try {
region.put(1, "b");
fail("Expect CacheClosedException here");
} catch (CacheClosedException cce) {
System.out.println(title + cce.getMessage());
if (DiskStoreImpl.KRF_DEBUG) {
assert cce.getMessage().contains("The disk store is closed.");
} else {
assert cce.getMessage().contains("The disk store is closed");
}
} finally {
expect.remove();
}
}
});
vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {
public void run2() throws CacheException {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
Wait.pause(500);
gfc.closeDiskStores();
synchronized (lockObject) {
lockObject.notify();
}
}
});
ThreadUtils.join(async1, MAX_WAIT);
closeCache(vm0);
// destroy
createPR(vm0, 0);
vm0.invoke(new CacheSerializableRunnable(title + "server add writer") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
// let the region to hold on the put until diskstore is closed
if (!DiskStoreImpl.KRF_DEBUG) {
region.getAttributesMutator().setCacheWriter(new MyWriter());
}
}
});
async1 = vm0.invokeAsync(new CacheSerializableRunnable(title + "async destroy") {
public void run2() throws CacheException {
Region region = getRootRegion(PR_REGION_NAME);
IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
try {
region.destroy(2, "b");
fail("Expect CacheClosedException here");
} catch (CacheClosedException cce) {
System.out.println(title + cce.getMessage());
if (DiskStoreImpl.KRF_DEBUG) {
assert cce.getMessage().contains("The disk store is closed.");
} else {
assert cce.getMessage().contains("The disk store is closed");
}
} finally {
expect.remove();
}
}
});
vm0.invoke(new CacheSerializableRunnable(title + "close disk store") {
public void run2() throws CacheException {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
Wait.pause(500);
gfc.closeDiskStores();
synchronized (lockObject) {
lockObject.notify();
}
}
});
ThreadUtils.join(async1, MAX_WAIT);
checkData(vm0, 0, 10, "a");
checkData(vm0, 10, 11, null);
closeCache(vm0);
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testCleanupAfterConflict.
@Test
public void testCleanupAfterConflict() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
createPR(vm0, 0);
// create some buckets
createData(vm0, 0, 2, "a");
closePR(vm0);
createPR(vm1, 0);
// create an overlapping bucket
createData(vm1, 1, 2, "a");
IgnoredException[] expectVm0 = { IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0), IgnoredException.addIgnoredException("CacheClosedException", vm0) };
try {
// This results in ConflictingPersistentDataException. As part of
// GEODE-2918, the cache is closed, when ConflictingPersistentDataException
// is encountered.
createPR(vm0, 0);
fail("should have seen a conflicting data exception");
} catch (Exception ex) {
boolean expectedException = false;
if (ex.getCause() instanceof CacheClosedException) {
CacheClosedException cce = (CacheClosedException) ex.getCause();
if (cce.getCause() instanceof ConflictingPersistentDataException) {
expectedException = true;
}
}
if (!expectedException) {
throw ex;
}
} finally {
for (IgnoredException ie : expectVm0) {
ie.remove();
}
}
IgnoredException expectVm1 = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
try {
createData(vm1, 0, 1, "a");
} catch (Exception e) {
// restart.
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
} finally {
expectVm1.remove();
}
closePR(vm1);
// This should succeed, vm0 should not have persisted any view
// information from vm1
createPR(vm0, 0);
checkData(vm0, 0, 2, "a");
checkData(vm0, 2, 3, null);
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testRevokeBeforeStartup.
// GEODE-974: async actions, time sensitive, 65 second timeouts
@Category(FlakyTest.class)
@Test
public void testRevokeBeforeStartup() throws Throwable {
IgnoredException.addIgnoredException("RevokeFailedException");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
int numBuckets = 50;
createPR(vm0, 1);
createPR(vm1, 1);
createData(vm0, 0, numBuckets, "a");
Set<Integer> vm0Buckets = getBucketList(vm0);
Set<Integer> vm1Buckets = getBucketList(vm1);
assertEquals(vm0Buckets, vm1Buckets);
// This should fail with a revocation failed message
try {
revokeAllMembers(vm2);
fail("The revoke should have failed, because members are running");
} catch (RMIException e) {
if (!(e.getCause() instanceof ReplyException && e.getCause().getCause() instanceof RevokeFailedException)) {
throw e;
}
}
closeCache(vm0);
createData(vm1, 0, numBuckets, "b");
File vm1Directory = getDiskDirectory(vm1);
closeCache(vm1);
vm0.invoke(new SerializableRunnable("get cache") {
public void run() {
getCache();
}
});
revokeMember(vm2, vm1Directory);
AsyncInvocation a1 = createPRAsync(vm0, 1);
a1.getResult(MAX_WAIT);
assertEquals(vm0Buckets, getBucketList(vm0));
checkData(vm0, 0, numBuckets, "a");
createData(vm0, numBuckets, 113, "b");
checkData(vm0, numBuckets, 113, "b");
IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
try {
createPR(vm1, 1);
fail("Should have recieved a SplitDistributedSystemException");
} catch (RMIException e) {
// We revoked this member.
if (!(e.getCause() instanceof RevokedPersistentDataException)) {
throw e;
}
}
ex.remove();
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method checkReadWriteOperationsWithOfflineMember.
private void checkReadWriteOperationsWithOfflineMember(VM vm0, final int aVM0Bucket, final int aVM1Bucket) {
// This should work, because this bucket is still available.
checkData(vm0, aVM0Bucket, aVM0Bucket + 1, "a");
try {
checkData(vm0, aVM1Bucket, aVM1Bucket + 1, null);
fail("Should not have been able to read from missing buckets!");
} catch (RMIException e) {
// We expect a PartitionOfflineException
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
// Try a function execution
vm0.invoke(new SerializableRunnable("Test ways to read") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
try {
FunctionService.onRegion(region).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should work, because this bucket is still available.
FunctionService.onRegion(region).withFilter(Collections.singleton(aVM0Bucket)).execute(new TestFunction());
// This should fail, because this bucket is offline
try {
FunctionService.onRegion(region).withFilter(Collections.singleton(aVM1Bucket)).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should fail, because a bucket is offline
try {
HashSet filter = new HashSet();
filter.add(aVM0Bucket);
filter.add(aVM1Bucket);
FunctionService.onRegion(region).withFilter(filter).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should fail, because a bucket is offline
try {
FunctionService.onRegion(region).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
try {
cache.getQueryService().newQuery("select * from /" + PR_REGION_NAME).execute();
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
} catch (QueryException e) {
throw new RuntimeException(e);
}
try {
Set keys = region.keySet();
// iterate over all of the keys
for (Object key : keys) {
}
fail("Should not have been able to iterate over keyset");
} catch (PartitionOfflineException e) {
// expected
}
try {
// iterate over all of the keys
for (Object key : region.values()) {
}
fail("Should not have been able to iterate over set");
} catch (PartitionOfflineException e) {
// expected
}
try {
// iterate over all of the keys
for (Object key : region.entrySet()) {
}
fail("Should not have been able to iterate over set");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.get(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.containsKey(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.getEntry(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.invalidate(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.destroy(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
}
});
try {
createData(vm0, aVM1Bucket, aVM1Bucket + 1, "b");
fail("Should not have been able to write to missing buckets!");
} catch (RMIException e) {
// We expect to see a partition offline exception here.
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
expect.remove();
}
use of org.apache.geode.test.dunit.IgnoredException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testBadSerializationInAsyncThread.
/**
* Test for bug #49972 - handle a serialization error in the async writer thread.
*/
@Ignore("Bug 50376")
@Test
public void testBadSerializationInAsyncThread() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
final int numBuckets = 50;
vm0.invoke(new SerializableRunnable() {
@Override
public void run() {
FAIL_IN_THIS_VM = true;
}
});
IgnoredException expected1 = IgnoredException.addIgnoredException("Fatal error from asynch");
IgnoredException expected2 = IgnoredException.addIgnoredException("ToDataException");
try {
int redundancy = 1;
createPR(vm0, redundancy, -1, 113, false);
createPR(vm2, redundancy, -1, 113, false);
// Trigger bucket creation
createData(vm0, 0, numBuckets, "a");
createPR(vm1, redundancy, -1, 113, false);
// write objects which will fail serialization in async writer thread.
vm0.invoke(new SerializableRunnable() {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
try {
for (int i = 0; i < numBuckets; i++) {
region.put(i, new BadSerializer());
// this will trigger a deserialiation (could have also done this put with a function I
// guess.
region.get(i);
}
} catch (DiskAccessException ex) {
if (ex.getMessage().contains("the flusher thread had been terminated")) {
// expected
} else {
throw ex;
}
}
}
});
// Wait for the thread to get hosed.
Thread.sleep(2000);
createData(vm1, 0, numBuckets, "b");
// Try to do puts from vm1, which doesn't have any buckets
createData(vm1, numBuckets, numBuckets * 2, "b");
createData(vm1, numBuckets, numBuckets * 2, "c");
// make sure everything has settle out (these VM's I suppose may be terminated)
checkData(vm2, 0, numBuckets, "b");
checkData(vm2, numBuckets, numBuckets * 2, "c");
} finally {
expected1.remove();
expected2.remove();
}
}
Aggregations