use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForCacheClose.
/**
* This function <br>
* 1. calls the cache.close on the VM <br>
* 2. creates the cache again & also the PR <br>
*
* @return cacheSerializable object
*
* NOTE: Closing of the cache must be done from the test case rather than in
* PRQueryDUintHelper
*
*/
public CacheSerializableRunnable getCacheSerializableRunnableForCacheClose(final String regionName, final int redundancy, final Class constraint) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("cacheClose") {
@Override
public void run2() throws CacheException {
final String expectedCacheClosedException = CacheClosedException.class.getName();
final String expectedReplyException = ReplyException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + expectedCacheClosedException + "</ExpectedException>");
getCache().getLogger().info("<ExpectedException action=add>" + expectedReplyException + "</ExpectedException>");
Cache cache = getCache();
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Recreating the cache ");
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(constraint);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
attr.setPartitionAttributes(prAttr);
final CountDownLatch cdl = new CountDownLatch(1);
ResourceObserverAdapter observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
cdl.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
try {
cache.createRegion(regionName, attr.create());
// Wait for recovery to finish
cdl.await();
} catch (InterruptedException e) {
Assert.fail("interupted", e);
} finally {
InternalResourceManager.setResourceObserver(null);
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: cache Recreated on VM ");
getCache().getLogger().info("<ExpectedException action=remove>" + expectedReplyException + "</ExpectedException>");
getCache().getLogger().info("<ExpectedException action=remove>" + expectedCacheClosedException + "</ExpectedException>");
}
};
return (CacheSerializableRunnable) PrRegion;
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter in project geode by apache.
the class RebalanceOperationDUnitTest method testMembershipChange.
/**
* Test that the rebalancing operation picks up on a concurrent membership change
*/
@Test
public void testMembershipChange() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
final SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
// Create the region in only 1 VM
vm0.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
region.put(Integer.valueOf(3), "A");
region.put(Integer.valueOf(4), "A");
region.put(Integer.valueOf(5), "A");
region.put(Integer.valueOf(6), "A");
}
});
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Now do a rebalance, but start another member in the middle
vm0.invoke(new SerializableCallable("D rebalance") {
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager manager = cache.getInternalResourceManager();
final CountDownLatch rebalancingStarted = new CountDownLatch(1);
final CountDownLatch memberAdded = new CountDownLatch(1);
InternalResourceManager.setResourceObserver(new ResourceObserverAdapter() {
boolean firstBucket = true;
@Override
public void movingBucket(Region region, int bucketId, DistributedMember source, DistributedMember target) {
if (firstBucket) {
firstBucket = false;
vm2.invoke(createPrRegion);
}
}
});
RebalanceResults results = doRebalance(false, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(4, results.getTotalBucketTransfersCompleted());
assertTrue(0 < results.getTotalBucketTransferBytes());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(4, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsBefore();
// there should have only been 2 members when the rebalancing started.
assertEquals(2, beforeDetails.size());
// if it was done, there should now be 3 members.
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(3, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(2, memberDetails.getBucketCount());
assertEquals(2, memberDetails.getPrimaryCount());
}
verifyStats(manager, results);
InternalResourceManager mgr = (InternalResourceManager) manager;
ResourceManagerStats stats = mgr.getStats();
assertEquals(1, stats.getRebalanceMembershipChanges());
return null;
}
});
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter in project geode by apache.
the class PartitionedRegionDelayedRecoveryDUnitTest method testDelay.
// GEODE-860: time sensitive, thread unsafe test hook, CountDownLatch,
@Category(FlakyTest.class)
// InterruptedException
@Test
public void testDelay() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableRunnable createPrRegions = new SerializableRunnable("createRegions") {
public void run() {
final CountDownLatch rebalancingFinished = new CountDownLatch(1);
InternalResourceManager.setResourceObserver(new ResourceObserverAdapter() {
@Override
public void rebalancingOrRecoveryFinished(Region region) {
rebalancingFinished.countDown();
}
});
try {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRecoveryDelay(5000);
paf.setRedundantCopies(1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
if (!rebalancingFinished.await(60000, TimeUnit.MILLISECONDS)) {
fail("Redundancy recovery did not happen within 60 seconds");
}
} catch (InterruptedException e) {
Assert.fail("interrupted", e);
} finally {
InternalResourceManager.setResourceObserver(null);
}
}
};
// create the region in 2 VMS
vm0.invoke(createPrRegions);
vm1.invoke(createPrRegions);
// Do 1 put, which should create 1 bucket
vm0.invoke(new SerializableRunnable("putData") {
public void run() {
Cache cache = getCache();
PartitionedRegion region1 = (PartitionedRegion) cache.getRegion("region1");
region1.put("A", "B");
}
});
// create the region in a third VM, which won't have any buckets
vm2.invoke(createPrRegions);
final long begin = System.currentTimeMillis();
// close 1 cache, which should make the bucket drop below
// the expected redundancy level.
vm1.invoke(new SerializableRunnable("close cache") {
public void run() {
Cache cache = getCache();
cache.close();
}
});
long elapsed = waitForBucketRecovery(vm2, 1, begin);
assertTrue("Did not wait at least 5 seconds to create the bucket. Elapsed=" + elapsed, elapsed >= 5000);
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter in project geode by apache.
the class PartitionedRegionHADUnitTest method testBucketFailOverDuringCacheClose.
/**
* Test to ensure that we have proper bucket failover, with no data loss, in the face of
* sequential cache.close() events.
*
* @throws Exception
*/
@Test
public void testBucketFailOverDuringCacheClose() throws Exception {
final String regionName = getUniqueName();
final Boolean value = new Boolean(true);
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
CacheSerializableRunnable createPR = new CacheSerializableRunnable("createRegion") {
public void run2() throws CacheException {
Cache cache = getCache();
final CountDownLatch rebalancingFinished = new CountDownLatch(1);
InternalResourceManager.setResourceObserver(new ResourceObserverAdapter() {
@Override
public void rebalancingOrRecoveryFinished(Region region) {
rebalancingFinished.countDown();
}
});
try {
Region partitionedregion = cache.createRegion(regionName, createRegionAttributesForPR(1, 20));
if (!rebalancingFinished.await(60000, TimeUnit.MILLISECONDS)) {
fail("Redundancy recovery did not happen within 60 seconds");
}
assertNotNull(partitionedregion);
} catch (InterruptedException e) {
Assert.fail("interrupted", e);
} finally {
InternalResourceManager.setResourceObserver(null);
}
}
};
vm2.invoke(createPR);
vm3.invoke(createPR);
vm3.invoke(new CacheSerializableRunnable("createPRBuckets") {
public void run2() throws CacheException {
Cache cache = getCache();
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(regionName);
assertTrue(pr.isEmpty());
Integer k;
// Create keys such that all buckets are created, Integer works well
// assuming buckets are allocated on the mod of the key hashCode, x 2 just to be safe
final int numEntries = pr.getTotalNumberOfBuckets() * 2;
for (int i = numEntries; i >= 0; --i) {
k = new Integer(i);
pr.put(k, value);
}
assertEquals(numEntries + 1, pr.size());
assertEquals(pr.getRegionAdvisor().getBucketSet().size(), pr.getTotalNumberOfBuckets());
}
});
CacheSerializableRunnable existsEntryCheck = new CacheSerializableRunnable("PRExistsEntryCheck") {
public void run2() throws CacheException {
Cache cache = getCache();
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(regionName);
Integer k;
for (int i = pr.getTotalNumberOfBuckets() * 2; i >= 0; --i) {
k = new Integer(i);
assertTrue("containsKey for key=" + k, pr.containsKey(k));
assertEquals("get for key=" + k, value, pr.get(k));
}
}
};
vm3.invoke(existsEntryCheck);
vm2.invoke(existsEntryCheck);
CacheSerializableRunnable closeCache = new CacheSerializableRunnable("PRCloseCache") {
public void run2() throws CacheException {
Cache cache = getCache();
cache.close();
}
};
// origin VM down!
vm2.invoke(closeCache);
// origin down, but no data loss
vm3.invoke(existsEntryCheck);
// get back to the desired redundancy
vm0.invoke(createPR);
// verify no data loss
vm0.invoke(existsEntryCheck);
// 2nd oldest VM down!
vm3.invoke(closeCache);
// 2nd down, but no data loss
vm0.invoke(existsEntryCheck);
// get back (for 2nd time) to desired redundancy
vm1.invoke(createPR);
// verify no data loss
vm1.invoke(existsEntryCheck);
vm0.invoke(existsEntryCheck);
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserverAdapter in project geode by apache.
the class PartitionedRegionHADUnitTest method testGrabBackupBuckets.
////////// test methods ////////////////
@Test
public void testGrabBackupBuckets() throws Throwable {
Host host = Host.getHost(0);
VM dataStore0 = host.getVM(0);
// VM dataStore1 = host.getVM(1);
VM dataStore2 = host.getVM(2);
VM accessor = host.getVM(3);
final int redundantCopies = 1;
// Create PRs On 2 VMs
CacheSerializableRunnable createPRs = new CacheSerializableRunnable("createPrRegions") {
public void run2() throws CacheException {
final CountDownLatch recoveryDone = new CountDownLatch(MAX_REGIONS);
ResourceObserver waitForRecovery = new ResourceObserverAdapter() {
@Override
public void rebalancingOrRecoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(waitForRecovery);
try {
Cache cache = getCache();
System.setProperty(PartitionedRegion.RETRY_TIMEOUT_PROPERTY, "20000");
for (int i = 0; i < MAX_REGIONS; i++) {
cache.createRegion(PR_PREFIX + i, createRegionAttributesForPR(redundantCopies, 200));
}
System.setProperty(PartitionedRegion.RETRY_TIMEOUT_PROPERTY, Integer.toString(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
if (!recoveryDone.await(60, TimeUnit.SECONDS)) {
fail("recovery didn't happen in 60 seconds");
}
} catch (InterruptedException e) {
Assert.fail("recovery wait interrupted", e);
} finally {
InternalResourceManager.setResourceObserver(null);
}
}
};
CacheSerializableRunnable createAccessor = new CacheSerializableRunnable("createAccessor") {
public void run2() throws CacheException {
Cache cache = getCache();
for (int i = 0; i < MAX_REGIONS; i++) {
cache.createRegion(PR_PREFIX + i, createRegionAttributesForPR(redundantCopies, 0));
}
}
};
// Create PRs on only 2 VMs
dataStore0.invoke(createPRs);
// dataStore1.invoke(createPRs);
final String expectedExceptions = PartitionedRegionStorageException.class.getName();
SerializableRunnable addExpectedExceptions = new CacheSerializableRunnable("addExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
LogWriterUtils.getLogWriter().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
}
};
SerializableRunnable removeExpectedExceptions = new CacheSerializableRunnable("removeExpectedExceptions") {
public void run2() throws CacheException {
LogWriterUtils.getLogWriter().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
}
};
// Do put operations on these 2 PRs asynchronosly.
CacheSerializableRunnable dataStore0Puts = new CacheSerializableRunnable("dataStore0PutOperations") {
public void run2() {
Cache cache = getCache();
for (int j = 0; j < MAX_REGIONS; j++) {
Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + j);
assertNotNull(pr);
for (int k = 0; k < 10; k++) {
pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
}
LogWriterUtils.getLogWriter().info("VM0 Done put successfully for PR = " + PR_PREFIX + j);
}
}
};
CacheSerializableRunnable dataStore1Puts = new // TODO bug36296
CacheSerializableRunnable(// TODO bug36296
"dataStore1PutOperations") {
public void run2() {
Cache cache = getCache();
for (int j = 0; j < MAX_REGIONS; j++) {
Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + (j));
assertNotNull(pr);
for (int k = 10; k < 20; k++) {
pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
}
LogWriterUtils.getLogWriter().info("VM1 Done put successfully for PR = " + PR_PREFIX + j);
}
}
};
dataStore0.invoke(addExpectedExceptions);
// dataStore1.invoke(addExpectedExceptions);
AsyncInvocation async0 = dataStore0.invokeAsync(dataStore0Puts);
// AsyncInvocation async1 = dataStore1.invokeAsync(dataStore1Puts);
ThreadUtils.join(async0, 30 * 1000);
// async1.join();
dataStore0.invoke(removeExpectedExceptions);
// dataStore1.invoke(removeExpectedExceptions);
// Verify that buckets can not be created if there are not enough Nodes to support
// the redundancy Configuration
assertFalse(async0.exceptionOccurred());
// assertTrue(async0.getException() instanceof PartitionedRegionStorageException);
// assertTrue(async1.exceptionOccurred());
// assertTrue(async1.getException() instanceof PartitionedRegionStorageException);
// At this point redundancy criterion is not meet.
// now if we create PRs on more VMs, it should create those "supposed to
// be redundant" buckets on these nodes, if it can accommodate the data
// (localMaxMemory>0).
dataStore2.invoke(createPRs);
async0 = dataStore0.invokeAsync(dataStore0Puts);
// async1 = dataStore1.invokeAsync(dataStore1Puts);
ThreadUtils.join(async0, 30 * 1000);
if (async0.exceptionOccurred()) {
Assert.fail("async0 failed", async0.getException());
}
// assertFalse(async1.exceptionOccurred());
accessor.invoke(createAccessor);
for (int c = 0; c < MAX_REGIONS; c++) {
final Integer ri = new Integer(c);
final SerializableCallable validateLocalBucket2RegionMapSize = new SerializableCallable("validateLocalBucket2RegionMapSize") {
public Object call() throws Exception {
int size = 0;
Cache cache = getCache();
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + ri.intValue());
if (pr.getDataStore() != null) {
size = pr.getDataStore().getBucketsManaged();
}
return new Integer(size);
}
};
final SerializableCallable validateBucketsOnNode = new SerializableCallable("validateBucketOnNode") {
public Object call() throws Exception {
int containsNode = 0;
Cache cache = getCache();
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + ri.intValue());
Iterator it = pr.getRegionAdvisor().getBucketSet().iterator();
Set nodeList;
try {
while (it.hasNext()) {
Integer bucketId = (Integer) it.next();
nodeList = pr.getRegionAdvisor().getBucketOwners(bucketId.intValue());
if ((nodeList != null) && (nodeList.contains(pr.getMyId()))) {
containsNode++;
} else {
getCache().getLogger().fine("I don't contain member " + pr.getMyId());
}
}
} catch (NoSuchElementException done) {
}
return new Integer(containsNode);
}
};
// int vm0LBRsize =
// ((Integer)dataStore0.invoke(validateLocalBucket2RegionMapSize)).intValue();
int vm2LBRsize = ((Integer) dataStore2.invoke(validateLocalBucket2RegionMapSize)).intValue();
int vm3LBRsize = ((Integer) accessor.invoke(validateLocalBucket2RegionMapSize)).intValue();
// This would mean that up coming node didn't pick up any buckets
assertFalse(vm2LBRsize == 0);
// This accessor should NOT have picked up any buckets.
assertFalse(vm3LBRsize != 0);
int vm2B2Nsize = ((Integer) dataStore2.invoke(validateBucketsOnNode)).intValue();
LogWriterUtils.getLogWriter().info("vm2B2Nsize = " + vm2B2Nsize);
assertEquals(vm2B2Nsize, vm2LBRsize);
}
}
Aggregations