use of org.apache.geode.cache.persistence.PartitionOfflineException in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testParentRegionPutWithRecoveryInProgress.
@Test
public void testParentRegionPutWithRecoveryInProgress() throws Throwable {
SerializableRunnable createParentPR = new SerializableRunnable("createParentPR") {
public void run() {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
System.out.println("oldRetryTimeout = " + oldRetryTimeout);
try {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
SerializableRunnable createChildPR = new SerializableRunnable("createChildPR") {
public void run() throws InterruptedException {
String oldRetryTimeout = System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", "10000");
try {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(0);
paf.setColocatedWith(PR_REGION_NAME);
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setPartitionAttributes(paf.create());
Thread.sleep(1000);
cache.createRegion("region2", af.create());
} finally {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "partitionedRegionRetryTimeout", String.valueOf(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
}
}
};
boolean caughtException = false;
try {
// Expect a get() on the un-recovered (due to offline child) parent region to fail
regionGetWithOfflineChild(createParentPR, createChildPR, false);
} catch (Exception e) {
caughtException = true;
assertTrue(e instanceof RMIException);
assertTrue(e.getCause() instanceof PartitionOfflineException);
}
if (!caughtException) {
fail("Expected TimeoutException from remote");
}
}
use of org.apache.geode.cache.persistence.PartitionOfflineException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testCleanupAfterConflict.
@Test
public void testCleanupAfterConflict() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
createPR(vm0, 0);
// create some buckets
createData(vm0, 0, 2, "a");
closePR(vm0);
createPR(vm1, 0);
// create an overlapping bucket
createData(vm1, 1, 2, "a");
IgnoredException[] expectVm0 = { IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0), IgnoredException.addIgnoredException("CacheClosedException", vm0) };
try {
// This results in ConflictingPersistentDataException. As part of
// GEODE-2918, the cache is closed, when ConflictingPersistentDataException
// is encountered.
createPR(vm0, 0);
fail("should have seen a conflicting data exception");
} catch (Exception ex) {
boolean expectedException = false;
if (ex.getCause() instanceof CacheClosedException) {
CacheClosedException cce = (CacheClosedException) ex.getCause();
if (cce.getCause() instanceof ConflictingPersistentDataException) {
expectedException = true;
}
}
if (!expectedException) {
throw ex;
}
} finally {
for (IgnoredException ie : expectVm0) {
ie.remove();
}
}
IgnoredException expectVm1 = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
try {
createData(vm1, 0, 1, "a");
} catch (Exception e) {
// restart.
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
} finally {
expectVm1.remove();
}
closePR(vm1);
// This should succeed, vm0 should not have persisted any view
// information from vm1
createPR(vm0, 0);
checkData(vm0, 0, 2, "a");
checkData(vm0, 2, 3, null);
}
use of org.apache.geode.cache.persistence.PartitionOfflineException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method checkReadWriteOperationsWithOfflineMember.
private void checkReadWriteOperationsWithOfflineMember(VM vm0, final int aVM0Bucket, final int aVM1Bucket) {
// This should work, because this bucket is still available.
checkData(vm0, aVM0Bucket, aVM0Bucket + 1, "a");
try {
checkData(vm0, aVM1Bucket, aVM1Bucket + 1, null);
fail("Should not have been able to read from missing buckets!");
} catch (RMIException e) {
// We expect a PartitionOfflineException
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
// Try a function execution
vm0.invoke(new SerializableRunnable("Test ways to read") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(PR_REGION_NAME);
try {
FunctionService.onRegion(region).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should work, because this bucket is still available.
FunctionService.onRegion(region).withFilter(Collections.singleton(aVM0Bucket)).execute(new TestFunction());
// This should fail, because this bucket is offline
try {
FunctionService.onRegion(region).withFilter(Collections.singleton(aVM1Bucket)).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should fail, because a bucket is offline
try {
HashSet filter = new HashSet();
filter.add(aVM0Bucket);
filter.add(aVM1Bucket);
FunctionService.onRegion(region).withFilter(filter).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
// This should fail, because a bucket is offline
try {
FunctionService.onRegion(region).execute(new TestFunction());
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
}
try {
cache.getQueryService().newQuery("select * from /" + PR_REGION_NAME).execute();
fail("Should not have been able to read from missing buckets!");
} catch (PartitionOfflineException e) {
// expected
} catch (QueryException e) {
throw new RuntimeException(e);
}
try {
Set keys = region.keySet();
// iterate over all of the keys
for (Object key : keys) {
}
fail("Should not have been able to iterate over keyset");
} catch (PartitionOfflineException e) {
// expected
}
try {
// iterate over all of the keys
for (Object key : region.values()) {
}
fail("Should not have been able to iterate over set");
} catch (PartitionOfflineException e) {
// expected
}
try {
// iterate over all of the keys
for (Object key : region.entrySet()) {
}
fail("Should not have been able to iterate over set");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.get(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.containsKey(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.getEntry(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.invalidate(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
try {
region.destroy(aVM1Bucket);
fail("Should not have been able to get an offline key");
} catch (PartitionOfflineException e) {
// expected
}
}
});
try {
createData(vm0, aVM1Bucket, aVM1Bucket + 1, "b");
fail("Should not have been able to write to missing buckets!");
} catch (RMIException e) {
// We expect to see a partition offline exception here.
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
expect.remove();
}
use of org.apache.geode.cache.persistence.PartitionOfflineException in project geode by apache.
the class PersistentPartitionedRegionDUnitTest method testBug42226.
/**
* Test for bug 4226. 1. Member A has the bucket 2. Member B starts creating the bucket. It tells
* member A that it hosts the bucket 3. Member A crashes 4. Member B destroys the bucket and
* throws a partition offline exception, because it wasn't able to complete initialization. 5.
* Member A recovers, and gets stuck waiting for member B.
*
* @throws Throwable
*/
// GEODE-1208: time sensitive, multiple non-thread-safe test hooks,
@Category(FlakyTest.class)
// async actions
@Test
public void testBug42226() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
// Add a hook which will disconnect from the distributed
// system when the initial image message shows up.
vm0.invoke(new SerializableRunnable() {
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof RequestImageMessage) {
RequestImageMessage rim = (RequestImageMessage) message;
// Don't disconnect until we see a bucket
if (rim.regionPath.contains("_B_")) {
DistributionMessageObserver.setInstance(null);
disconnectFromDS();
}
}
}
@Override
public void afterProcessMessage(DistributionManager dm, DistributionMessage message) {
}
});
}
});
LogWriterUtils.getLogWriter().info("Creating region in VM0");
createPR(vm0, 1, 0, 1);
// Make sure we create a bucket
createData(vm0, 0, 1, "a");
// This should recover redundancy, which should cause vm0 to disconnect
IgnoredException ex = IgnoredException.addIgnoredException("PartitionOfflineException");
try {
LogWriterUtils.getLogWriter().info("Creating region in VM1");
createPR(vm1, 1, 0, 1);
// Make sure get a partition offline exception
try {
createData(vm1, 0, 1, "a");
} catch (RMIException e) {
// We expect a PartitionOfflineException
if (!(e.getCause() instanceof PartitionOfflineException)) {
throw e;
}
}
} finally {
ex.remove();
}
// Make sure vm0 is really disconnected (avoids a race with the observer).
vm0.invoke(new SerializableRunnable() {
public void run() {
disconnectFromDS();
}
});
// This should recreate the bucket
AsyncInvocation async1 = createPRAsync(vm0, 1, 0, 1);
async1.getResult(MAX_WAIT);
checkData(vm1, 0, 1, "a");
}
Aggregations