use of org.apache.geode.test.dunit.RMIException in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testParentRegionGetWithRecoveryInProgress.
@Test
public void testParentRegionGetWithRecoveryInProgress() throws Throwable {
SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
public void run() {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
try {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
System.out.println("oldRetryTimeout = " + oldRetryTimeout);
}
}
};
SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
public void run() throws InterruptedException {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
try {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
paf.setColocatedWith(PR_REGION_NAME);
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
boolean caughtException = false;
try {
// Expect a get() on the un-recovered (due to offline child) parent region to fail
regionGetWithOfflineChild(createParentPR, createChildPR, false);
} catch (Exception e) {
caughtException = true;
assertTrue(e instanceof RMIException);
assertTrue(e.getCause() instanceof PartitionOfflineException);
}
if (!caughtException) {
fail("Expected TimeoutException from remote");
}
}
use of org.apache.geode.test.dunit.RMIException in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testParentRegionGetWithOfflineChildRegion.
@Test
public void testParentRegionGetWithOfflineChildRegion() throws Throwable {
SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
public void run() {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
try {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
public void run() throws InterruptedException {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
try {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
paf.setColocatedWith(PR_REGION_NAME);
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setPartitionAttributes(paf.create());
// delay child region creations to cause a delay in persistent recovery
Thread.sleep(100);
cache.createRegion("region2", af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
boolean caughtException = false;
try {
// Expect a get() on the un-recovered (due to offline child) parent region to fail
regionGetWithOfflineChild(createParentPR, createChildPR, false);
} catch (Exception e) {
caughtException = true;
assertTrue(e instanceof RMIException);
assertTrue(e.getCause() instanceof PartitionOfflineException);
}
if (!caughtException) {
fail("Expected TimeoutException from remote");
}
}
use of org.apache.geode.test.dunit.RMIException in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testParentRegionPutWithRecoveryInProgress.
@Test
public void testParentRegionPutWithRecoveryInProgress() throws Throwable {
SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
public void run() {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
System.out.println("oldRetryTimeout = " + oldRetryTimeout);
try {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
public void run() throws InterruptedException {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
try {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
paf.setColocatedWith(PR_REGION_NAME);
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setPartitionAttributes(paf.create());
Thread.sleep(1000);
cache.createRegion("region2", af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
boolean caughtException = false;
try {
// Expect a get() on the un-recovered (due to offline child) parent region to fail
regionGetWithOfflineChild(createParentPR, createChildPR, false);
} catch (Exception e) {
caughtException = true;
assertTrue(e instanceof RMIException);
assertTrue(e.getCause() instanceof PartitionOfflineException);
}
if (!caughtException) {
fail("Expected TimeoutException from remote");
}
}
use of org.apache.geode.test.dunit.RMIException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testRevokeBeforeStartup.
// GEODE-974: async actions, time sensitive, 65 second timeouts
@Category(FlakyTest.class)
@Test
public void testRevokeBeforeStartup() throws Throwable {
IgnoredException.addIgnoredException("RevokeFailedException");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
int numBuckets = 50;
createPR(vm0, 1);
createPR(vm1, 1);
createData(vm0, 0, numBuckets, "a");
Set<Integer> vm0Buckets = getBucketList(vm0);
Set<Integer> vm1Buckets = getBucketList(vm1);
assertEquals(vm0Buckets, vm1Buckets);
// This should fail with a revocation failed message
try {
revokeAllMembers(vm2);
fail("The revoke should have failed, because members are running");
} catch (RMIException e) {
if (!(e.getCause() instanceof ReplyException && e.getCause().getCause() instanceof RevokeFailedException)) {
throw e;
}
}
closeCache(vm0);
createData(vm1, 0, numBuckets, "b");
File vm1Directory = getDiskDirectory(vm1);
closeCache(vm1);
vm0.invoke(new SerializableRunnable("get cache") {
public void run() {
getCache();
}
});
revokeMember(vm2, vm1Directory);
AsyncInvocation a1 = createPRAsync(vm0, 1);
a1.getResult(MAX_WAIT);
assertEquals(vm0Buckets, getBucketList(vm0));
checkData(vm0, 0, numBuckets, "a");
createData(vm0, numBuckets, 113, "b");
checkData(vm0, numBuckets, 113, "b");
IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
try {
createPR(vm1, 1);
fail("Should have recieved a SplitDistributedSystemException");
} catch (RMIException e) {
// We revoked this member.
if (!(e.getCause() instanceof RevokedPersistentDataException)) {
throw e;
}
}
ex.remove();
}
use of org.apache.geode.test.dunit.RMIException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method checkReadWriteOperationsWithOfflineMember.
private void checkReadWriteOperationsWithOfflineMember(VM vm0, final int aVM0Bucket, final int aVM1Bucket) {
// This should work, because this bucket is still available.
checkData(vm0, aVM0Bucket, aVM0Bucket + 1, "a");
try {
checkData(vm0, aVM1Bucket, aVM1Bucket + 1, null);
fail("Should not have been able to read from missing buckets!");
} catch (RMIException e) {
// We expect a PartitionOfflineException
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
// Try a function execution
vm0.invoke(new SerializableRunnable("Test ways to read") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
try {
FunctionService.onRegion(region).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should work, because this bucket is still available.
FunctionService.onRegion(region).withFilter(Collections.singleton(aVM0Bucket)).execute(new TestFunction());
// This should fail, because this bucket is offline
try {
FunctionService.onRegion(region).withFilter(Collections.singleton(aVM1Bucket)).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should fail, because a bucket is offline
try {
HashSet filter = new HashSet();
filter.add(aVM0Bucket);
filter.add(aVM1Bucket);
FunctionService.onRegion(region).withFilter(filter).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should fail, because a bucket is offline
try {
FunctionService.onRegion(region).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
try {
cache.getQueryService().newQuery("select * from /" + PR_REGION_NAME).execute();
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
} catch (QueryException e) {
throw new RuntimeException(e);
}
try {
Set keys = region.keySet();
// iterate over all of the keys
for (Object key : keys) {
}
fail("Should not have been able to iterate over keyset");
} catch (PartitionOfflineException e) {
// expected
}
try {
// iterate over all of the keys
for (Object key : region.values()) {
}
fail("Should not have been able to iterate over set");
} catch (PartitionOfflineException e) {
// expected
}
try {
// iterate over all of the keys
for (Object key : region.entrySet()) {
}
fail("Should not have been able to iterate over set");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.get(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.containsKey(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.getEntry(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.invalidate(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.destroy(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
}
});
try {
createData(vm0, aVM1Bucket, aVM1Bucket + 1, "b");
fail("Should not have been able to write to missing buckets!");
} catch (RMIException e) {
// We expect to see a partition offline exception here.
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
expect.remove();
}
Aggregations