use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver in project geode by apache.
the class ShutdownAllDUnitTest method getCreatePRRunnable.
private SerializableRunnable getCreatePRRunnable(final String regionName, final String diskStoreName, final int redundancy) {
return new SerializableRunnable("create pr") {
@Override
public void run() {
final CountDownLatch recoveryDone;
if (redundancy > 0) {
recoveryDone = new CountDownLatch(1);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
} else {
recoveryDone = null;
}
Cache cache = ShutdownAllDUnitTest.this.getCache();
if (diskStoreName != null) {
DiskStore ds = cache.findDiskStore(diskStoreName);
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(diskStoreName);
}
}
AttributesFactory af = new AttributesFactory();
// use async to trigger flush
af.setDiskSynchronous(false);
af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
af.setPartitionAttributes(paf.create());
if (diskStoreName != null) {
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName(diskStoreName);
} else {
af.setDataPolicy(DataPolicy.PARTITION);
}
cache.createRegion(regionName, af.create());
if (recoveryDone != null) {
try {
recoveryDone.await();
} catch (InterruptedException e) {
Assert.fail("Interrupted", e);
}
}
}
};
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver in project geode by apache.
the class MyGatewayEventSubstitutionFilter method createPRWithRedundantCopyWithAsyncEventQueue.
/**
* Create PartitionedRegion with 1 redundant copy
*/
public static void createPRWithRedundantCopyWithAsyncEventQueue(String regionName, String asyncEventQueueId, Boolean offHeap) throws InterruptedException {
IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class.getName());
CountDownLatch recoveryDone = new CountDownLatch(2);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
try {
AttributesFactory fact = new AttributesFactory();
PartitionAttributesFactory pfact = new PartitionAttributesFactory();
pfact.setTotalNumBuckets(16);
pfact.setRedundantCopies(1);
fact.setPartitionAttributes(pfact.create());
fact.setOffHeap(offHeap);
Region r = cache.createRegionFactory(fact.create()).addAsyncEventQueueId(asyncEventQueueId).create(regionName);
assertNotNull(r);
recoveryDone.await();
} finally {
exp.remove();
}
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method getCreateChildPRRunnable.
private SerializableRunnable getCreateChildPRRunnable() {
return new SerializableRunnable("createChildPR") {
public void run() {
Cache cache = getCache();
final CountDownLatch recoveryDone = new CountDownLatch(1);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
if (region.getName().equals("region2")) {
recoveryDone.countDown();
}
}
};
InternalResourceManager.setResourceObserver(observer);
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
try {
recoveryDone.await(MAX_WAIT, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
Assert.fail("interrupted", e);
}
}
};
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testReplaceOfflineMemberAndRestartCreateColocatedPRLate.
@Test
public void testReplaceOfflineMemberAndRestartCreateColocatedPRLate() throws Throwable {
SerializableRunnable createParentPR = new SerializableRunnable() {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(0);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
}
};
SerializableRunnable createChildPR = new SerializableRunnable() {
public void run() {
Cache cache = getCache();
final CountDownLatch recoveryDone = new CountDownLatch(1);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
if (region.getName().contains("region2")) {
recoveryDone.countDown();
}
}
};
InternalResourceManager.setResourceObserver(observer);
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(0);
paf.setColocatedWith(PR_REGION_NAME);
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
try {
if (!recoveryDone.await(MAX_WAIT, TimeUnit.MILLISECONDS)) {
fail("timed out");
}
} catch (InterruptedException e) {
Assert.fail("interrupted", e);
}
}
};
replaceOfflineMemberAndRestartCreateColocatedPRLate(createParentPR, createChildPR);
}
use of org.apache.geode.internal.cache.control.InternalResourceManager.ResourceObserver in project geode by apache.
the class PartitionedRegionHADUnitTest method testGrabBackupBuckets.
////////// test methods ////////////////
@Test
public void testGrabBackupBuckets() throws Throwable {
Host host = Host.getHost(0);
VM dataStore0 = host.getVM(0);
// VM dataStore1 = host.getVM(1);
VM dataStore2 = host.getVM(2);
VM accessor = host.getVM(3);
final int redundantCopies = 1;
// Create PRs On 2 VMs
CacheSerializableRunnable createPRs = new CacheSerializableRunnable("createPrRegions") {
public void run2() throws CacheException {
final CountDownLatch recoveryDone = new CountDownLatch(MAX_REGIONS);
ResourceObserver waitForRecovery = new ResourceObserverAdapter() {
@Override
public void rebalancingOrRecoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(waitForRecovery);
try {
Cache cache = getCache();
System.setProperty(PartitionedRegion.RETRY_TIMEOUT_PROPERTY, "20000");
for (int i = 0; i < MAX_REGIONS; i++) {
cache.createRegion(PR_PREFIX + i, createRegionAttributesForPR(redundantCopies, 200));
}
System.setProperty(PartitionedRegion.RETRY_TIMEOUT_PROPERTY, Integer.toString(PartitionedRegionHelper.DEFAULT_TOTAL_WAIT_RETRY_ITERATION));
if (!recoveryDone.await(60, TimeUnit.SECONDS)) {
fail("recovery didn't happen in 60 seconds");
}
} catch (InterruptedException e) {
Assert.fail("recovery wait interrupted", e);
} finally {
InternalResourceManager.setResourceObserver(null);
}
}
};
CacheSerializableRunnable createAccessor = new CacheSerializableRunnable("createAccessor") {
public void run2() throws CacheException {
Cache cache = getCache();
for (int i = 0; i < MAX_REGIONS; i++) {
cache.createRegion(PR_PREFIX + i, createRegionAttributesForPR(redundantCopies, 0));
}
}
};
// Create PRs on only 2 VMs
dataStore0.invoke(createPRs);
// dataStore1.invoke(createPRs);
final String expectedExceptions = PartitionedRegionStorageException.class.getName();
SerializableRunnable addExpectedExceptions = new CacheSerializableRunnable("addExpectedExceptions") {
public void run2() throws CacheException {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
LogWriterUtils.getLogWriter().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
}
};
SerializableRunnable removeExpectedExceptions = new CacheSerializableRunnable("removeExpectedExceptions") {
public void run2() throws CacheException {
LogWriterUtils.getLogWriter().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
}
};
// Do put operations on these 2 PRs asynchronosly.
CacheSerializableRunnable dataStore0Puts = new CacheSerializableRunnable("dataStore0PutOperations") {
public void run2() {
Cache cache = getCache();
for (int j = 0; j < MAX_REGIONS; j++) {
Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + j);
assertNotNull(pr);
for (int k = 0; k < 10; k++) {
pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
}
LogWriterUtils.getLogWriter().info("VM0 Done put successfully for PR = " + PR_PREFIX + j);
}
}
};
CacheSerializableRunnable dataStore1Puts = new // TODO bug36296
CacheSerializableRunnable(// TODO bug36296
"dataStore1PutOperations") {
public void run2() {
Cache cache = getCache();
for (int j = 0; j < MAX_REGIONS; j++) {
Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + (j));
assertNotNull(pr);
for (int k = 10; k < 20; k++) {
pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
}
LogWriterUtils.getLogWriter().info("VM1 Done put successfully for PR = " + PR_PREFIX + j);
}
}
};
dataStore0.invoke(addExpectedExceptions);
// dataStore1.invoke(addExpectedExceptions);
AsyncInvocation async0 = dataStore0.invokeAsync(dataStore0Puts);
// AsyncInvocation async1 = dataStore1.invokeAsync(dataStore1Puts);
ThreadUtils.join(async0, 30 * 1000);
// async1.join();
dataStore0.invoke(removeExpectedExceptions);
// dataStore1.invoke(removeExpectedExceptions);
// Verify that buckets can not be created if there are not enough Nodes to support
// the redundancy Configuration
assertFalse(async0.exceptionOccurred());
// assertTrue(async0.getException() instanceof PartitionedRegionStorageException);
// assertTrue(async1.exceptionOccurred());
// assertTrue(async1.getException() instanceof PartitionedRegionStorageException);
// At this point redundancy criterion is not meet.
// now if we create PRs on more VMs, it should create those "supposed to
// be redundant" buckets on these nodes, if it can accommodate the data
// (localMaxMemory>0).
dataStore2.invoke(createPRs);
async0 = dataStore0.invokeAsync(dataStore0Puts);
// async1 = dataStore1.invokeAsync(dataStore1Puts);
ThreadUtils.join(async0, 30 * 1000);
if (async0.exceptionOccurred()) {
Assert.fail("async0 failed", async0.getException());
}
// assertFalse(async1.exceptionOccurred());
accessor.invoke(createAccessor);
for (int c = 0; c < MAX_REGIONS; c++) {
final Integer ri = new Integer(c);
final SerializableCallable validateLocalBucket2RegionMapSize = new SerializableCallable("validateLocalBucket2RegionMapSize") {
public Object call() throws Exception {
int size = 0;
Cache cache = getCache();
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + ri.intValue());
if (pr.getDataStore() != null) {
size = pr.getDataStore().getBucketsManaged();
}
return new Integer(size);
}
};
final SerializableCallable validateBucketsOnNode = new SerializableCallable("validateBucketOnNode") {
public Object call() throws Exception {
int containsNode = 0;
Cache cache = getCache();
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + ri.intValue());
Iterator it = pr.getRegionAdvisor().getBucketSet().iterator();
Set nodeList;
try {
while (it.hasNext()) {
Integer bucketId = (Integer) it.next();
nodeList = pr.getRegionAdvisor().getBucketOwners(bucketId.intValue());
if ((nodeList != null) && (nodeList.contains(pr.getMyId()))) {
containsNode++;
} else {
getCache().getLogger().fine("I don't contain member " + pr.getMyId());
}
}
} catch (NoSuchElementException done) {
}
return new Integer(containsNode);
}
};
// int vm0LBRsize =
// ((Integer)dataStore0.invoke(validateLocalBucket2RegionMapSize)).intValue();
int vm2LBRsize = ((Integer) dataStore2.invoke(validateLocalBucket2RegionMapSize)).intValue();
int vm3LBRsize = ((Integer) accessor.invoke(validateLocalBucket2RegionMapSize)).intValue();
// This would mean that up coming node didn't pick up any buckets
assertFalse(vm2LBRsize == 0);
// This accessor should NOT have picked up any buckets.
assertFalse(vm3LBRsize != 0);
int vm2B2Nsize = ((Integer) dataStore2.invoke(validateBucketsOnNode)).intValue();
LogWriterUtils.getLogWriter().info("vm2B2Nsize = " + vm2B2Nsize);
assertEquals(vm2B2Nsize, vm2LBRsize);
}
}
Aggregations