use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class ParallelGatewaySenderQueueJUnitTest method mockBucketRegionQueue.
private BucketRegionQueue mockBucketRegionQueue(final Queue backingList) {
PartitionedRegion mockBucketRegion = mockPR("bucketRegion");
// These next mocked return calls are for when peek is called. It ends up checking these on the
// mocked pr region
when(mockBucketRegion.getLocalMaxMemory()).thenReturn(100);
when(mockBucketRegion.size()).thenReturn(backingList.size());
BucketRegionQueue bucketRegionQueue = mock(BucketRegionQueue.class);
when(bucketRegionQueue.getPartitionedRegion()).thenReturn(mockBucketRegion);
when(bucketRegionQueue.peek()).thenAnswer((Answer) invocation -> backingList.poll());
return bucketRegionQueue;
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class ParallelGatewaySenderQueueJUnitTest method mockPR.
private PartitionedRegion mockPR(String name) {
PartitionedRegion region = mock(PartitionedRegion.class);
when(region.getFullPath()).thenReturn(name);
when(region.getPartitionAttributes()).thenReturn(new PartitionAttributesFactory<>().create());
when(region.getTotalNumberOfBuckets()).thenReturn(113);
when(region.getDataPolicy()).thenReturn(DataPolicy.PARTITION);
return region;
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class MyGatewayEventSubstitutionFilter method createPRWithRedundantCopyWithAsyncEventQueue.
/**
* Create PartitionedRegion with 1 redundant copy
*/
public static void createPRWithRedundantCopyWithAsyncEventQueue(String regionName, String asyncEventQueueId, Boolean offHeap) throws InterruptedException {
IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class.getName());
CountDownLatch recoveryDone = new CountDownLatch(2);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
try {
AttributesFactory fact = new AttributesFactory();
PartitionAttributesFactory pfact = new PartitionAttributesFactory();
pfact.setTotalNumBuckets(16);
pfact.setRedundantCopies(1);
fact.setPartitionAttributes(pfact.create());
fact.setOffHeap(offHeap);
Region r = cache.createRegionFactory(fact.create()).addAsyncEventQueueId(asyncEventQueueId).create(regionName);
assertNotNull(r);
recoveryDone.await();
} finally {
exp.remove();
}
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class DiskStoreCommandsDUnitTest method testMissingDiskStoreCommandWithColocation.
@Test
public void testMissingDiskStoreCommandWithColocation() {
final String regionName = "testShowPersistentRecoveryFailuresRegion";
final String childName = "childRegion";
setUpJmxManagerOnVm0ThenConnect(null);
final VM vm0 = Host.getHost(0).getVM(0);
final VM vm1 = Host.getHost(0).getVM(1);
final String vm1Name = "VM" + vm1.getPid();
final String diskStoreName = "DiskStoreCommandsDUnitTest";
// Default setup creates a cache in the Manager, now create a cache in VM1
vm1.invoke(new SerializableRunnable() {
public void run() {
Properties localProps = new Properties();
localProps.setProperty(NAME, vm1Name);
getSystem(localProps);
Cache cache = getCache();
}
});
// Create a disk store and region in the Manager (VM0) and VM1 VMs
for (final VM vm : (new VM[] { vm0, vm1 })) {
final String vmName = "VM" + vm.getPid();
vm.invoke(new SerializableRunnable() {
public void run() {
Cache cache = getCache();
File diskStoreDirFile = new File(diskStoreName + vm.getPid());
diskStoreDirFile.mkdirs();
DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
diskStoreFactory.setDiskDirs(new File[] { diskStoreDirFile });
diskStoreFactory.setMaxOplogSize(1);
diskStoreFactory.setAllowForceCompaction(true);
diskStoreFactory.setAutoCompact(false);
diskStoreFactory.create(regionName);
diskStoreFactory.create(childName);
RegionFactory regionFactory = cache.createRegionFactory();
regionFactory.setDiskStoreName(regionName);
regionFactory.setDiskSynchronous(true);
regionFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
regionFactory.create(regionName);
PartitionAttributes pa = new PartitionAttributesFactory().setColocatedWith(regionName).create();
RegionFactory childRegionFactory = cache.createRegionFactory();
childRegionFactory.setPartitionAttributes(pa);
childRegionFactory.setDiskStoreName(childName);
childRegionFactory.setDiskSynchronous(true);
childRegionFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
childRegionFactory.create(childName);
}
});
}
// Add data to the region
vm0.invoke(new SerializableRunnable() {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(regionName);
region.put("A", "a");
region.put("B", "b");
}
});
// Make sure that everything thus far is okay and there are no missing disk stores
CommandResult cmdResult = executeCommand(CliStrings.SHOW_MISSING_DISK_STORE);
System.out.println("command result=\n" + commandResultToString(cmdResult));
assertEquals(Result.Status.OK, cmdResult.getStatus());
assertTrue(cmdResult.toString(), commandResultToString(cmdResult).contains("No missing disk store found"));
// Close the regions in the Manager (VM0) VM
vm0.invoke(new SerializableRunnable() {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(childName);
region.close();
region = cache.getRegion(regionName);
region.close();
}
});
// Add data to VM1 and then close the region
vm1.invoke(new SerializableRunnable() {
public void run() {
Cache cache = getCache();
Region childRegion = cache.getRegion(childName);
PartitionedRegion parentRegion = (PartitionedRegion) (cache.getRegion(regionName));
try {
parentRegion.put("A", "C");
} catch (Exception e) {
// Ignore any exception on the put
}
childRegion.close();
parentRegion.close();
}
});
SerializableRunnable restartParentRegion = new SerializableRunnable("Restart parent region on") {
public void run() {
Cache cache = getCache();
RegionFactory regionFactory = cache.createRegionFactory();
regionFactory.setDiskStoreName(regionName);
regionFactory.setDiskSynchronous(true);
regionFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
try {
regionFactory.create(regionName);
} catch (Exception e) {
// okay to ignore
}
}
};
SerializableRunnable restartChildRegion = new SerializableRunnable("Restart child region") {
public void run() {
Cache cache = getCache();
PartitionAttributes pa = new PartitionAttributesFactory().setColocatedWith(regionName).create();
RegionFactory regionFactory = cache.createRegionFactory();
regionFactory.setPartitionAttributes(pa);
regionFactory.setDiskStoreName(childName);
regionFactory.setDiskSynchronous(true);
regionFactory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
try {
regionFactory.create(childName);
} catch (Exception e) {
// okay to ignore
e.printStackTrace();
}
}
};
// Add the region back to the Manager (VM0) VM
AsyncInvocation async0 = vm0.invokeAsync(restartParentRegion);
AsyncInvocation async1 = vm1.invokeAsync(restartParentRegion);
// Wait for the region in the Manager (VM0) to come online
vm0.invoke(new SerializableRunnable("WaitForRegionInVm0") {
public void run() {
WaitCriterion waitCriterion = new WaitCriterion() {
public boolean done() {
Cache cache = getCache();
PersistentMemberManager memberManager = ((GemFireCacheImpl) cache).getPersistentMemberManager();
return !memberManager.getWaitingRegions().isEmpty();
}
public String description() {
return "Waiting for another persistent member to come online";
}
};
try {
waitForCriterion(waitCriterion, 5000, 100, true);
} catch (AssertionError ae) {
// Ignore. waitForCriterion is expected to timeout in this test
}
}
});
// Validate that there is a missing disk store on VM1
try {
cmdResult = executeCommand(CliStrings.SHOW_MISSING_DISK_STORE);
assertNotNull("Expect command result != null", cmdResult);
assertEquals(Result.Status.OK, cmdResult.getStatus());
String stringResult = commandResultToString(cmdResult);
System.out.println("command result=\n" + stringResult);
// Expect 2 result sections with header lines and 4 information lines in the first section
assertEquals(6, countLinesInString(stringResult, false));
assertTrue(stringContainsLine(stringResult, "Host.*Distributed Member.*Parent Region.*Missing Colocated Region"));
assertTrue(stringContainsLine(stringResult, ".*" + regionName + ".*" + childName));
AsyncInvocation async0b = vm0.invokeAsync(restartChildRegion);
try {
async0b.get(5000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
// Expected timeout - Region recovery is still waiting on vm1 child region and disk-store to
// come online
}
cmdResult = executeCommand(CliStrings.SHOW_MISSING_DISK_STORE);
assertNotNull("Expect command result != null", cmdResult);
assertEquals(Result.Status.OK, cmdResult.getStatus());
stringResult = commandResultToString(cmdResult);
System.out.println("command result=\n" + stringResult);
// Extract the id from the returned missing disk store
String line = getLineFromString(stringResult, 4);
assertFalse(line.contains("---------"));
StringTokenizer resultTokenizer = new StringTokenizer(line);
String id = resultTokenizer.nextToken();
AsyncInvocation async1b = vm1.invokeAsync(restartChildRegion);
try {
async1b.get(5000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
e.printStackTrace();
}
cmdResult = executeCommand(CliStrings.SHOW_MISSING_DISK_STORE);
assertNotNull("Expect command result != null", cmdResult);
assertEquals(Result.Status.OK, cmdResult.getStatus());
stringResult = commandResultToString(cmdResult);
System.out.println("command result=\n" + stringResult);
} finally {
// Verify that the invokeAsync thread terminated
try {
async0.get(10000, TimeUnit.MILLISECONDS);
async1.get(10000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
fail("Unexpected timeout waitiong for invokeAsync threads to terminate: " + e.getMessage());
}
}
// Do our own cleanup so that the disk store directories can be removed
super.destroyDefaultSetup();
for (final VM vm : (new VM[] { vm0, vm1 })) {
final String vmName = "VM" + vm.getPid();
vm.invoke(new SerializableRunnable() {
public void run() {
try {
FileUtils.deleteDirectory((new File(diskStoreName + vm.getPid())));
} catch (IOException iex) {
// There's nothing else we can do
}
}
});
}
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class PartitionedRegionAPIDUnitTest method testCacherLoaderHelper.
@Test
public void testCacherLoaderHelper() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
final int localMaxMemory = 10;
final String key1 = "key1";
final String arg = "loaderArg";
CacheSerializableRunnable createLoaderPR = new CacheSerializableRunnable("createLoaderPR") {
public void run2() throws CacheException {
getCache();
CacheLoader cl = new TestCacheLoader() {
public Object load2(LoaderHelper helper) throws CacheLoaderException {
assertNotNull(helper);
assertEquals(key1, helper.getKey());
assertEquals(rName, helper.getRegion().getName());
assertEquals(arg, helper.getArgument());
return helper.getArgument();
}
};
PartitionedRegion pr = (PartitionedRegion) new RegionFactory().setCacheLoader(cl).setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(1).setLocalMaxMemory(localMaxMemory).create()).create(rName);
assertSame(cl, pr.getDataStore().getCacheLoader());
}
};
vm2.invoke(createLoaderPR);
vm3.invoke(createLoaderPR);
// create a "pure" accessor, no data storage
getCache();
Region pr = new RegionFactory().setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(1).setLocalMaxMemory(0).create()).create(rName);
assertEquals(arg, pr.get(key1, arg));
}
Aggregations