use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RebalanceOperationDUnitTest method testRecoverRedundancyBalancingIfCreateBucketFails.
@Test
public void testRecoverRedundancyBalancingIfCreateBucketFails() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
final DistributedMember member1 = createPrRegion(vm0, "region1", 100, null);
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
for (int i = 0; i < 1; i++) {
region.put(Integer.valueOf(i), "A");
}
}
});
SerializableRunnable checkRedundancy = new SerializableRunnable("checkRedundancy") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(1, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(1, details.getLowRedundancyBucketCount());
}
};
vm0.invoke(checkRedundancy);
// Now create the region in 2 more VMs
// Let localMaxMemory(VM1) > localMaxMemory(VM2)
// so that redundant bucket will always be attempted on VM1
final DistributedMember member2 = createPrRegion(vm1, "region1", 100, null);
final DistributedMember member3 = createPrRegion(vm2, "region1", 90, null);
vm0.invoke(checkRedundancy);
// Inject mock PRHARedundancyProvider to simulate createBucketFailures
vm0.invoke(new SerializableRunnable("injectCreateBucketFailureAndRebalance") {
@Override
public void run() {
GemFireCacheImpl cache = spy(getGemfireCache());
// set the spied cache instance
GemFireCacheImpl origCache = GemFireCacheImpl.setInstanceForTests(cache);
PartitionedRegion origRegion = (PartitionedRegion) cache.getRegion("region1");
PartitionedRegion spyRegion = spy(origRegion);
PRHARedundancyProvider redundancyProvider = spy(new PRHARedundancyProvider(spyRegion));
// return the spied region when ever getPartitionedRegions() is invoked
Set<PartitionedRegion> parRegions = cache.getPartitionedRegions();
parRegions.remove(origRegion);
parRegions.add(spyRegion);
doReturn(parRegions).when(cache).getPartitionedRegions();
doReturn(redundancyProvider).when(spyRegion).getRedundancyProvider();
// simulate create bucket fails on member2 and test if it creates on member3
doReturn(false).when(redundancyProvider).createBackupBucketOnMember(anyInt(), eq((InternalDistributedMember) member2), anyBoolean(), anyBoolean(), any(), anyBoolean());
// Now simulate a rebalance
// Create operationImpl and not factory as we need spied cache to be passed to operationImpl
RegionFilter filter = new FilterByPath(null, null);
RebalanceOperationImpl operation = new RebalanceOperationImpl(cache, false, filter);
operation.start();
RebalanceResults results = null;
try {
results = operation.getResults(MAX_WAIT, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Assert.fail("Interrupted waiting on rebalance", e);
} catch (TimeoutException e) {
Assert.fail("Timeout waiting on rebalance", e);
}
assertEquals(1, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(0, results.getTotalBucketTransferBytes());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(1, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertEquals(0, details.getBucketTransferBytes());
assertEquals(0, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(3, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
if (memberDetails.getDistributedMember().equals(member1)) {
assertEquals(1, memberDetails.getBucketCount());
assertEquals(1, memberDetails.getPrimaryCount());
} else if (memberDetails.getDistributedMember().equals(member2)) {
assertEquals(0, memberDetails.getBucketCount());
assertEquals(0, memberDetails.getPrimaryCount());
} else if (memberDetails.getDistributedMember().equals(member3)) {
assertEquals(1, memberDetails.getBucketCount());
assertEquals(0, memberDetails.getPrimaryCount());
}
}
ResourceManagerStats stats = cache.getInternalResourceManager().getStats();
assertEquals(0, stats.getRebalancesInProgress());
assertEquals(1, stats.getRebalancesCompleted());
assertEquals(0, stats.getRebalanceBucketCreatesInProgress());
assertEquals(results.getTotalBucketCreatesCompleted(), stats.getRebalanceBucketCreatesCompleted());
assertEquals(1, stats.getRebalanceBucketCreatesFailed());
// set the original cache
GemFireCacheImpl.setInstanceForTests(origCache);
}
});
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(1, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
}
};
vm0.invoke(checkRedundancyFixed);
vm1.invoke(checkRedundancyFixed);
vm2.invoke(checkRedundancyFixed);
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RebalanceOperationDUnitTest method moveBucketsColocatedRegions.
/**
* Test to make sure that we move buckets to balance between three hosts with a redundancy of 1
* and two colocated regions. Makes sure that the buckets stay colocated when we move them.
*
* @param simulate
*/
public void moveBucketsColocatedRegions(final boolean simulate) {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
createPrRegion(vm0, "region1", 200, null);
createPrRegion(vm0, "region2", 200, "region1");
createPrRegion(vm1, "region1", 200, null);
createPrRegion(vm1, "region2", 200, "region1");
// Create some buckets.
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
Region region2 = cache.getRegion("region2");
for (int i = 0; i < 12; i++) {
region.put(Integer.valueOf(i), "A");
region2.put(Integer.valueOf(i), "A");
}
}
});
// create the leader region, but not the colocated
// region in one of the VMs.
createPrRegion(vm2, "region1", 200, null);
// Simulate a rebalance, and make sure we don't
// move any buckets yet, because we don't have
// the colocated region in the new VMs.
vm0.invoke(new SerializableRunnable("rebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(0, results.getTotalBucketTransferBytes());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(2, detailSet.size());
for (PartitionRebalanceInfo details : detailSet) {
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertEquals(0, details.getBucketTransferBytes());
assertEquals(0, details.getBucketTransfersCompleted());
}
}
});
// now create the colocated region in the last VM.
createPrRegion(vm2, "region2", 200, "region1");
vm0.invoke(new SerializableRunnable("rebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(16, results.getTotalBucketTransfersCompleted());
assertTrue(0 < results.getTotalBucketTransferBytes());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(2, detailSet.size());
for (PartitionRebalanceInfo details : detailSet) {
assertEquals(0, details.getBucketCreatesCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(8, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(3, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(8, memberDetails.getBucketCount());
assertEquals(4, memberDetails.getPrimaryCount());
}
}
}
});
if (!simulate) {
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
PartitionedRegion region1 = (PartitionedRegion) cache.getRegion("region1");
PartitionedRegion region2 = (PartitionedRegion) cache.getRegion("region2");
ResourceManager manager = cache.getResourceManager();
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(cache.getRegion("region1"));
assertEquals(12, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
details = PartitionRegionHelper.getPartitionRegionInfo(cache.getRegion("region2"));
assertEquals(12, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(region1.getLocalPrimaryBucketsListTestOnly(), region2.getLocalPrimaryBucketsListTestOnly());
assertEquals(region1.getLocalBucketsListTestOnly(), region2.getLocalBucketsListTestOnly());
}
};
vm0.invoke(checkRedundancyFixed);
vm1.invoke(checkRedundancyFixed);
vm2.invoke(checkRedundancyFixed);
}
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RebalanceOperationDUnitTest method recoverRedundancyParallelAsyncEventQueue.
public void recoverRedundancyParallelAsyncEventQueue(final boolean simulate) {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
final DistributedMember member1 = createPRRegionWithAsyncQueue(vm0, 200);
// Create some buckets. Put enough data to cause the queue to overflow (more than 1 MB)
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
Region region2 = cache.getRegion("region2");
for (int i = 0; i < 12; i++) {
region.put(Integer.valueOf(i), "A", new byte[1024 * 512]);
}
// GEODE-244 - the async event queue uses asnychronous writes. Flush
// the default disk store to make sure all values have overflowed
cache.findDiskStore(null).flush();
}
});
// check to make sure our redundancy is impaired
SerializableRunnable checkLowRedundancy = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(12, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(12, details.getLowRedundancyBucketCount());
// Get the async event queue region (It's a colocated region)
PartitionedRegion region2 = ColocationHelper.getColocatedChildRegions((PartitionedRegion) region).get(0);
details = PartitionRegionHelper.getPartitionRegionInfo(region2);
assertEquals(12, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(12, details.getLowRedundancyBucketCount());
AsyncEventQueue queue = cache.getAsyncEventQueue("parallelQueue");
assertEquals(12, queue.size());
}
};
vm0.invoke(checkLowRedundancy);
// Create the region on two more members, each with 1/2 of the memory
createPRRegionWithAsyncQueue(vm1, 100);
createPRRegionWithAsyncQueue(vm2, 100);
vm0.invoke(checkLowRedundancy);
// Now simulate a rebalance
vm0.invoke(new SerializableRunnable("rebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(24, results.getTotalBucketCreatesCompleted());
assertEquals(12, results.getTotalPrimaryTransfersCompleted());
assertEquals(0, results.getTotalBucketTransferBytes());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(2, detailSet.size());
for (PartitionRebalanceInfo details : detailSet) {
assertEquals(12, details.getBucketCreatesCompleted());
assertEquals(6, details.getPrimaryTransfersCompleted());
assertEquals(0, details.getBucketTransferBytes());
assertEquals(0, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(3, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
if (memberDetails.getDistributedMember().equals(member1)) {
assertEquals(12, memberDetails.getBucketCount());
assertEquals(6, memberDetails.getPrimaryCount());
} else {
assertEquals(6, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
}
if (!simulate) {
verifyStats(manager, results);
}
}
}
});
if (!simulate) {
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
PartitionedRegion region1 = (PartitionedRegion) cache.getRegion("region1");
// Get the async event queue region (It's a colocated region)
PartitionedRegion region2 = ColocationHelper.getColocatedChildRegions(region1).get(0);
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(cache.getRegion("region1"));
assertEquals(12, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
details = PartitionRegionHelper.getPartitionRegionInfo(region2);
assertEquals(12, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(region1.getLocalPrimaryBucketsListTestOnly(), region2.getLocalPrimaryBucketsListTestOnly());
assertEquals(region1.getLocalBucketsListTestOnly(), region2.getLocalBucketsListTestOnly());
}
};
vm0.invoke(checkRedundancyFixed);
vm1.invoke(checkRedundancyFixed);
vm2.invoke(checkRedundancyFixed);
}
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RebalanceOperationDUnitTest method waitForBucketList.
private void waitForBucketList(final String regionName, VM vm0, final Collection<Integer> expected) {
SerializableCallable getBuckets = new SerializableCallable("get buckets") {
public Object call() throws Exception {
Cache cache = getCache();
final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
Wait.waitForCriterion(new WaitCriterion() {
@Override
public boolean done() {
TreeSet<Integer> results = getBuckets();
return results.equals(expected);
}
protected TreeSet<Integer> getBuckets() {
TreeSet<Integer> results = new TreeSet<Integer>(region.getDataStore().getAllLocalBucketIds());
return results;
}
@Override
public String description() {
return "Timeout waiting for buckets to match. Expected " + expected + " but got " + getBuckets();
}
}, 60000, 100, true);
return null;
}
};
vm0.invoke(getBuckets);
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class PRFunctionExecutionDUnitTest method testRemoteSingleKeyExecution_byName_FunctionInvocationTargetException.
/**
* Test remote execution by a pure accessor which doesn't have the function factory
* present.Function throws the FunctionInvocationTargetException. As this is the case of HA then
* system should retry the function execution. After 5th attempt function will send Boolean as
* last result.
*/
@Test
public void testRemoteSingleKeyExecution_byName_FunctionInvocationTargetException() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM accessor = host.getVM(2);
final VM datastore = host.getVM(3);
getCache();
accessor.invoke(new SerializableCallable("Create PR") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 0);
getCache().createRegion(rName, ra);
return Boolean.TRUE;
}
});
datastore.invoke(new SerializableCallable("Create PR with Function Factory") {
public Object call() throws Exception {
RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
AttributesFactory raf = new AttributesFactory(ra);
PartitionAttributesImpl pa = new PartitionAttributesImpl();
pa.setAll(ra.getPartitionAttributes());
raf.setPartitionAttributes(pa);
getCache().createRegion(rName, raf.create());
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_REEXECUTE_EXCEPTION);
FunctionService.registerFunction(function);
return Boolean.TRUE;
}
});
accessor.invoke(new SerializableCallable("Create data, invoke exectuable") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
final String testKey = "execKey";
final Set testKeysSet = new HashSet();
testKeysSet.add(testKey);
DistributedSystem.setThreadsSocketPolicy(false);
Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_REEXECUTE_EXCEPTION);
FunctionService.registerFunction(function);
Execution dataSet = FunctionService.onRegion(pr);
pr.put(testKey, new Integer(1));
try {
ResultCollector rs1 = dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
List list = (ArrayList) rs1.getResult();
assertEquals(list.get(0), 5);
} catch (Throwable e) {
e.printStackTrace();
Assert.fail("This is not expected Exception", e);
}
return Boolean.TRUE;
}
});
}
Aggregations