use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class PartitionedRegionAPIDUnitTest method testBug36685.
/**
* Verify that localMaxMemory is set correctly when using attributes
*
* @throws Exception
*/
@Test
public void testBug36685() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
vm0 = host.getVM(0);
vm1 = host.getVM(1);
vm2 = host.getVM(2);
vm3 = host.getVM(3);
CacheSerializableRunnable create = new CacheSerializableRunnable("createPR") {
public void run2() throws CacheException {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setDataPolicy(DataPolicy.PARTITION);
RegionAttributes regionAttribs = attr.create();
Region partitionedregion = cache.createRegion(rName, regionAttribs);
assertNotNull(partitionedregion);
assertNotNull(cache.getRegion(rName));
PartitionAttributes p = regionAttribs.getPartitionAttributes();
int maxMem = p.getLocalMaxMemory();
assertTrue("LocalMaxMemory is zero", maxMem != 0);
}
};
vm0.invoke(create);
destroyTheRegion(rName);
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class PartitionedRegionAPIDUnitTest method partitionedRegionTest.
public void partitionedRegionTest(final String prName) {
/*
* Do put(), create(), invalidate() operations through VM with PR having both Accessor and
* Datastore
*/
// String exceptionStr = "";
vm0.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations1") {
public void run2() throws CacheException {
Cache cache = getCache();
final Region pr = cache.getRegion(prName);
if (pr == null) {
fail(prName + " not created");
}
int size = 0;
// if (pr.getAttributes().getScope() == Scope.DISTRIBUTED_ACK) {
size = pr.size();
assertEquals("Size doesnt return expected value", 0, size);
assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", true, pr.isEmpty());
assertEquals(0, pr.keySet().size());
// }
for (int i = putRange_1Start; i <= putRange_1End; i++) {
// System.out.println("Putting entry for key = " + i);
pr.put(Integer.toString(i), Integer.toString(i));
}
// if (pr.getAttributes().getScope() == Scope.DISTRIBUTED_ACK) {
size = pr.size();
assertEquals("Size doesn't return expected value", putRange_1End, size);
assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", false, pr.isEmpty());
// Positive assertion of functionality in a distributed env.
// For basic functional support (or lack of), please see
// PartitionedRegionSingleNodeOperationsJUnitTest
assertEquals(putRange_1End, pr.keySet().size());
Set ks = pr.keySet();
Iterator ksI = ks.iterator();
while (ksI.hasNext()) {
try {
ksI.remove();
fail("Expected key set iterator to be read only");
} catch (Exception expected) {
}
Object key = ksI.next();
assertEquals(String.class, key.getClass());
Integer.parseInt((String) key);
}
try {
ksI.remove();
fail("Expected key set iterator to be read only");
} catch (Exception expected) {
}
assertFalse(ksI.hasNext());
try {
ksI.next();
fail("Expected no such element exception");
} catch (NoSuchElementException expected) {
assertFalse(ksI.hasNext());
}
// }
String exceptionStr = ReplyException.class.getName() + "||" + EntryNotFoundException.class.getName();
vm1.invoke(addExceptionTag1(exceptionStr));
vm2.invoke(addExceptionTag1(exceptionStr));
vm3.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (int i = putRange_1Start; i <= putRange_1End; i++) {
// System.out.println("Putting entry for key = " + i);
try {
pr.destroy(Integer.toString(i));
} catch (EntryNotFoundException enfe) {
searchForKey((PartitionedRegion) pr, Integer.toString(i));
throw enfe;
}
}
vm1.invoke(removeExceptionTag1(exceptionStr));
vm2.invoke(removeExceptionTag1(exceptionStr));
vm3.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
// if (pr.getAttributes().getScope() == Scope.DISTRIBUTED_ACK) {
size = pr.size();
assertEquals("Size doesnt return expected value = 0 instead it returns" + size, size, 0);
assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", pr.isEmpty(), true);
// }
for (int i = putRange_1Start; i <= putRange_1End; i++) {
// System.out.println("Putting entry for key = " + i);
pr.put(Integer.toString(i), Integer.toString(i));
}
// createInvalidateChange
for (int i = createRange_1Start; i <= createRange_1End; i++) {
Object val = null;
Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
pr.create(key, val);
}
final String expectedExceptions = EntryExistsException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
exceptionStr = ReplyException.class.getName() + "||" + expectedExceptions;
vm1.invoke(addExceptionTag1(exceptionStr));
vm2.invoke(addExceptionTag1(exceptionStr));
vm3.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (int i = createRange_1Start; i <= createRange_1End; i++) {
Object val = null;
Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
try {
pr.create(key, val);
fail("EntryExistsException is not thrown");
} catch (EntryExistsException expected) {
// cache.getLogger().fine("EntryExistsException is properly thrown");
}
}
vm1.invoke(removeExceptionTag1(exceptionStr));
vm2.invoke(removeExceptionTag1(exceptionStr));
vm3.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
// if (pr.getAttributes().getScope() == Scope.DISTRIBUTED_ACK) {
size = pr.size();
assertEquals("Size doesnt return expected value", size, 10);
// }
LogWriterUtils.getLogWriter().fine("All the puts done successfully for vm0.");
{
PartitionedRegion ppr = (PartitionedRegion) pr;
try {
ppr.dumpAllBuckets(true);
} catch (ReplyException re) {
fail();
}
}
}
});
/*
* Do put(), create(), invalidate() operations through VM with PR having only Accessor(no data
* store)
*/
vm1.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {
public void run2() throws CacheException {
Cache cache = getCache();
Region pr = cache.getRegion(prName);
if (pr == null) {
fail("PR not created");
}
for (int i = putRange_2Start; i <= putRange_2End; i++) {
// System.out.println("Putting entry for key = " + i);
pr.put(Integer.toString(i), Integer.toString(i));
}
// createInvalidateChange
for (int i = createRange_2Start; i <= createRange_2End; i++) {
Object val = null;
Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
pr.create(key, val);
}
final String entryExistsException = EntryExistsException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + entryExistsException + "</ExpectedException>");
String exceptionStr = ReplyException.class.getName() + ":" + entryExistsException;
vm0.invoke(addExceptionTag1(exceptionStr));
vm2.invoke(addExceptionTag1(exceptionStr));
vm3.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (int i = createRange_2Start; i <= createRange_2End; i++) {
Object val = null;
Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
try {
pr.create(key, val);
fail("EntryExistsException is not thrown");
} catch (EntryExistsException expected) {
// cache.getLogger().fine("EntryExistsException is properly thrown");
}
}
vm0.invoke(removeExceptionTag1(exceptionStr));
vm2.invoke(removeExceptionTag1(exceptionStr));
vm3.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
getCache().getLogger().info("<ExpectedException action=remove>" + entryExistsException + "</ExpectedException>");
cache.getLogger().fine("All the puts done successfully for vm1.");
}
});
/*
* Do destroy() operations through VM with PR having only Accessor(no data store). It also
* verifies that EntryNotFoundException is thrown if the entry is already destroyed.
*/
vm1.invoke(new CacheSerializableRunnable("doRemoveOperations1") {
public void run2() throws CacheException {
Cache cache = getCache();
Region pr = cache.getRegion(prName);
if (pr == null) {
fail("PR not created");
}
for (int i = removeRange_1Start; i <= removeRange_1End; i++) {
// System.out.println("destroying entry for key = " + i);
final String key = Integer.toString(i);
try {
pr.destroy(key);
} catch (EntryNotFoundException enfe) {
searchForKey((PartitionedRegion) pr, key);
throw enfe;
}
}
final String entryNotFoundException = EntryNotFoundException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + entryNotFoundException + "</ExpectedException>");
String exceptionStr = ReplyException.class.getName() + "||" + entryNotFoundException;
vm0.invoke(addExceptionTag1(exceptionStr));
vm2.invoke(addExceptionTag1(exceptionStr));
vm3.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (int i = removeRange_1Start; i <= removeRange_1End; i++) {
final String key = Integer.toString(i);
try {
pr.destroy(key);
fail("EntryNotFoundException is not thrown in destroy operation for key = " + i);
} catch (EntryNotFoundException expected) {
}
}
vm0.invoke(removeExceptionTag1(exceptionStr));
vm2.invoke(removeExceptionTag1(exceptionStr));
vm3.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
getCache().getLogger().info("<ExpectedException action=remove>" + entryNotFoundException + "</ExpectedException>");
LogWriterUtils.getLogWriter().fine("All the remove done successfully for vm0.");
}
});
/*
* Do more put(), create(), invalidate() operations through VM with PR having Accessor + data
* store
*/
vm2.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations3") {
public void run2() throws CacheException {
Cache cache = getCache();
Region pr = cache.getRegion(prName);
assertNotNull("PR not created", pr);
for (int i = putRange_3Start; i <= putRange_3End; i++) {
// System.out.println("Putting entry for key = " + i);
pr.put(Integer.toString(i), Integer.toString(i));
}
for (int i = createRange_3Start; i <= createRange_3End; i++) {
Object val = null;
Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
pr.create(key, val);
}
final String entryExistsException = EntryExistsException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + entryExistsException + "</ExpectedException>");
String exceptionStr = ReplyException.class.getName() + "||" + entryExistsException;
vm0.invoke(addExceptionTag1(exceptionStr));
vm1.invoke(addExceptionTag1(exceptionStr));
vm3.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (int i = createRange_3Start; i <= createRange_3End; i++) {
Object val = null;
Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
try {
pr.create(key, val);
fail("EntryExistsException is not thrown");
} catch (EntryExistsException expected) {
// getLogWriter().fine("EntryExistsException is properly thrown");
}
}
vm0.invoke(removeExceptionTag1(exceptionStr));
vm1.invoke(removeExceptionTag1(exceptionStr));
vm3.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
getCache().getLogger().info("<ExpectedException action=remove>" + entryExistsException + "</ExpectedException>");
}
});
/*
* Do more remove() operations through VM with PR having Accessor + data store
*/
vm2.invoke(new CacheSerializableRunnable("doRemoveOperations2") {
public void run2() throws CacheException {
int i = 0;
Cache cache = getCache();
Region pr = cache.getRegion(prName);
assertNotNull("PR not created", pr);
if (pr == null) {
fail("PR not created");
}
String key;
for (i = removeRange_2Start; i <= removeRange_2End; i++) {
// System.out.println("destroying entry for key = " + i);
key = Integer.toString(i);
try {
pr.destroy(key);
} catch (EntryNotFoundException enfe) {
searchForKey((PartitionedRegion) pr, key);
throw enfe;
}
}
final String entryNotFound = EntryNotFoundException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + entryNotFound + "</ExpectedException>");
String exceptionStr = ReplyException.class.getName() + "||" + entryNotFound;
vm0.invoke(addExceptionTag1(exceptionStr));
vm1.invoke(addExceptionTag1(exceptionStr));
vm3.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (i = removeRange_2Start; i <= removeRange_2End; i++) {
// System.out.println("destroying entry for key = " + i);
try {
pr.destroy(Integer.toString(i));
fail("EntryNotFoundException is not thrown in destroy operation for key = " + (Integer.toString(i)));
} catch (EntryNotFoundException expected) {
}
}
vm0.invoke(removeExceptionTag1(exceptionStr));
vm1.invoke(removeExceptionTag1(exceptionStr));
vm3.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
getCache().getLogger().info("<ExpectedException action=remove>" + entryNotFound + "</ExpectedException>");
}
});
/*
* Do more put() operations through VM with PR having only Accessor
*/
vm3.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations4") {
public void run2() throws CacheException {
Cache cache = getCache();
Region pr = cache.getRegion(prName);
assertNotNull("PR not created", pr);
for (int i = putRange_4Start; i <= putRange_4End; i++) {
// System.out.println("Putting entry for key = " + i);
pr.put(Integer.toString(i), Integer.toString(i));
}
for (int i = createRange_4Start; i <= createRange_4End; i++) {
Object val = null;
final Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
pr.create(key, val);
}
final String entryExistsException = EntryExistsException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + entryExistsException + "</ExpectedException>");
String exceptionStr = ReplyException.class.getName() + "||" + entryExistsException;
vm0.invoke(addExceptionTag1(exceptionStr));
vm1.invoke(addExceptionTag1(exceptionStr));
vm2.invoke(addExceptionTag1(exceptionStr));
addExceptionTag1(exceptionStr);
for (int i = createRange_4Start; i <= createRange_4End; i++) {
Object val = null;
final Object key = Integer.toString(i);
if (i % 2 == 0) {
val = Integer.toString(i);
}
try {
pr.create(key, val);
fail("EntryExistsException is not thrown");
} catch (EntryExistsException expected) {
// getLogWriter().fine("EntryExistsException is properly thrown");
}
}
vm0.invoke(removeExceptionTag1(exceptionStr));
vm1.invoke(removeExceptionTag1(exceptionStr));
vm2.invoke(removeExceptionTag1(exceptionStr));
removeExceptionTag1(exceptionStr);
getCache().getLogger().info("<ExpectedException action=remove>" + entryExistsException + "</ExpectedException>");
}
});
/*
* validate the data in PartionedRegion at different VM's
*
*/
CacheSerializableRunnable validateRegionAPIs = new CacheSerializableRunnable("validateInserts") {
public void run2() {
Cache cache = getCache();
Region pr = cache.getRegion(prName);
assertNotNull("PR not created", pr);
// Validation with get() operation.
for (int i = putRange_1Start; i <= putRange_4End; i++) {
Object val = pr.get(Integer.toString(i));
if ((i >= removeRange_1Start && i <= removeRange_1End) || (i >= removeRange_2Start && i <= removeRange_2End)) {
assertNull("Remove validation failed for key " + i, val);
} else {
assertNotNull("put() not done for key " + i, val);
}
}
// validation with containsKey() operation.
for (int i = putRange_1Start; i <= putRange_4End; i++) {
boolean conKey = pr.containsKey(Integer.toString(i));
if ((i >= removeRange_1Start && i <= removeRange_1End) || (i >= removeRange_2Start && i <= removeRange_2End)) {
assertFalse("containsKey() remove validation failed for key = " + i, conKey);
} else {
assertTrue("containsKey() Validation failed for key = " + i, conKey);
}
LogWriterUtils.getLogWriter().fine("containsKey() Validated entry for key = " + i);
}
// validation with containsValueForKey() operation
for (int i = putRange_1Start; i <= putRange_4End; i++) {
boolean conKey = pr.containsValueForKey(Integer.toString(i));
if ((i >= removeRange_1Start && i <= removeRange_1End) || (i >= removeRange_2Start && i <= removeRange_2End)) {
assertFalse("containsValueForKey() remove validation failed for key = " + i, conKey);
} else {
assertTrue("containsValueForKey() Validation failed for key = " + i, conKey);
}
LogWriterUtils.getLogWriter().fine("containsValueForKey() Validated entry for key = " + i);
}
}
};
// validate the data from all the VM's
vm0.invoke(validateRegionAPIs);
vm1.invoke(validateRegionAPIs);
vm2.invoke(validateRegionAPIs);
vm3.invoke(validateRegionAPIs);
/*
* destroy the Region.
*/
vm0.invoke(new CacheSerializableRunnable("destroyRegionOp") {
public void run2() {
Cache cache = getCache();
Region pr = cache.getRegion(prName);
assertNotNull("Region already destroyed.", pr);
pr.destroyRegion();
assertTrue("Region isDestroyed false", pr.isDestroyed());
assertNull("Region not destroyed.", cache.getRegion(prName));
}
});
/*
* validate the data after the region.destroy() operation.
*/
CacheSerializableRunnable validateAfterRegionDestroy = new CacheSerializableRunnable("validateInsertsAfterRegionDestroy") {
public void run2() throws CacheException {
Cache cache = getCache();
Region pr = null;
pr = cache.getRegion(prName);
assertNull("Region not destroyed.", pr);
Region rootRegion = cache.getRegion(Region.SEPARATOR + PartitionedRegionHelper.PR_ROOT_REGION_NAME);
// Verify allPartitionedRegion.
// Region allPrs = rootRegion
// .getSubregion(PartitionedRegionHelper.PARTITIONED_REGION_CONFIG_NAME);
Object configObj = rootRegion.get(prName.substring(1));
if (configObj != null) {
fail("PRConfig found in allPartitionedRegion Metadata for this PR.");
}
// Verify b2n region.
// Region b2nReg = rootRegion
// .getSubregion(PartitionedRegionHelper.BUCKET_2_NODE_TABLE_PREFIX);
// if (b2nReg != null) {
// fail("PRConfig found in allPartitionedRegion Metadata for this PR.");
// }
// Verify bucket Regions.
Set subreg = rootRegion.subregions(false);
for (java.util.Iterator itr = subreg.iterator(); itr.hasNext(); ) {
Region reg = (Region) itr.next();
String name = reg.getName();
if ((name.indexOf(PartitionedRegionHelper.BUCKET_REGION_PREFIX)) != -1) {
fail("Bucket exists. Bucket = " + name);
}
}
// verify prIdToPr Map.
boolean con = PartitionedRegion.prIdToPR.containsKey("PR1");
if (con == true) {
fail("prIdToPR contains pr reference ");
}
}
};
// validateAfterRegionDestory from all VM's
vm0.invoke(validateAfterRegionDestroy);
vm1.invoke(validateAfterRegionDestroy);
vm2.invoke(validateAfterRegionDestroy);
vm3.invoke(validateAfterRegionDestroy);
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RoutingObject method tryVerifyPrimaryColocation.
protected static boolean tryVerifyPrimaryColocation() {
HashMap customerPrimaryMap = new HashMap();
RegionAdvisor customeAdvisor = ((PartitionedRegion) customerPR).getRegionAdvisor();
Iterator customerIterator = customeAdvisor.getBucketSet().iterator();
while (customerIterator.hasNext()) {
Integer bucketId = (Integer) customerIterator.next();
if (customeAdvisor.isPrimaryForBucket(bucketId.intValue())) {
customerPrimaryMap.put(bucketId, customeAdvisor.getPrimaryMemberForBucket(bucketId.intValue()).getId());
}
}
HashMap orderPrimaryMap = new HashMap();
RegionAdvisor orderAdvisor = ((PartitionedRegion) orderPR).getRegionAdvisor();
Iterator orderIterator = orderAdvisor.getBucketSet().iterator();
while (orderIterator.hasNext()) {
Integer bucketId = (Integer) orderIterator.next();
if (orderAdvisor.isPrimaryForBucket(bucketId.intValue())) {
orderPrimaryMap.put(bucketId, orderAdvisor.getPrimaryMemberForBucket(bucketId.intValue()).getId());
}
}
HashMap shipmentPrimaryMap = new HashMap();
RegionAdvisor shipmentAdvisor = ((PartitionedRegion) shipmentPR).getRegionAdvisor();
Iterator shipmentIterator = shipmentAdvisor.getBucketSet().iterator();
while (shipmentIterator.hasNext()) {
Integer bucketId = (Integer) shipmentIterator.next();
if (shipmentAdvisor.isPrimaryForBucket(bucketId.intValue())) {
shipmentPrimaryMap.put(bucketId, shipmentAdvisor.getPrimaryMemberForBucket(bucketId.intValue()).getId());
}
}
// verification for primary
int s1, s2;
s1 = customerPrimaryMap.size();
s2 = orderPrimaryMap.size();
if (s1 != s2) {
excuse = "customerPrimaryMap size (" + s1 + ") != orderPrimaryMap size (" + s2 + ")";
return false;
}
if (!customerPrimaryMap.entrySet().equals(orderPrimaryMap.entrySet())) {
excuse = "customerPrimaryMap entrySet != orderPrimaryMap entrySet";
return false;
}
if (!customerPrimaryMap.entrySet().equals(shipmentPrimaryMap.entrySet())) {
excuse = "customerPrimaryMap entrySet != shipmentPrimaryMap entrySet";
return false;
}
if (!customerPrimaryMap.equals(orderPrimaryMap)) {
excuse = "customerPrimaryMap != orderPrimaryMap";
return false;
}
if (!customerPrimaryMap.equals(shipmentPrimaryMap)) {
excuse = "customerPrimaryMap != shipmentPrimaryMap";
return false;
}
return true;
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RoutingObject method dump.
protected static void dump() {
final InternalLogWriter logger = LogWriterUtils.getLogWriter();
((PartitionedRegion) customerPR).dumpAllBuckets(false);
((PartitionedRegion) orderPR).dumpAllBuckets(false);
((PartitionedRegion) shipmentPR).dumpAllBuckets(false);
for (int i = 0; i < 6; i++) {
((PartitionedRegion) customerPR).dumpB2NForBucket(i);
}
for (int i = 0; i < 6; i++) {
((PartitionedRegion) orderPR).dumpB2NForBucket(i);
}
for (int i = 0; i < 6; i++) {
((PartitionedRegion) shipmentPR).dumpB2NForBucket(i);
}
}
use of org.apache.geode.internal.cache.PartitionedRegion in project geode by apache.
the class RoutingObject method tryVerifyColocation.
/**
* @return true if verified
*/
protected static boolean tryVerifyColocation() {
HashMap customerMap = new HashMap();
HashMap customerPrimaryMap = new HashMap();
RegionAdvisor customeAdvisor = ((PartitionedRegion) customerPR).getRegionAdvisor();
Iterator customerIterator = customeAdvisor.getBucketSet().iterator();
while (customerIterator.hasNext()) {
Integer bucketId = (Integer) customerIterator.next();
Set someOwners = customeAdvisor.getBucketOwners(bucketId.intValue());
customerMap.put(bucketId, someOwners);
if (customeAdvisor.isPrimaryForBucket(bucketId.intValue())) {
customerPrimaryMap.put(bucketId, customeAdvisor.getPrimaryMemberForBucket(bucketId.intValue()).getId());
}
}
HashMap orderMap = new HashMap();
HashMap orderPrimaryMap = new HashMap();
RegionAdvisor orderAdvisor = ((PartitionedRegion) orderPR).getRegionAdvisor();
Iterator orderIterator = orderAdvisor.getBucketSet().iterator();
while (orderIterator.hasNext()) {
Integer bucketId = (Integer) orderIterator.next();
Set someOwners = orderAdvisor.getBucketOwners(bucketId.intValue());
orderMap.put(bucketId, someOwners);
if (orderAdvisor.isPrimaryForBucket(bucketId.intValue())) {
orderPrimaryMap.put(bucketId, orderAdvisor.getPrimaryMemberForBucket(bucketId.intValue()).getId());
}
}
HashMap shipmentMap = new HashMap();
HashMap shipmentPrimaryMap = new HashMap();
RegionAdvisor shipmentAdvisor = ((PartitionedRegion) shipmentPR).getRegionAdvisor();
Iterator shipmentIterator = shipmentAdvisor.getBucketSet().iterator();
while (shipmentIterator.hasNext()) {
Integer bucketId = (Integer) shipmentIterator.next();
Set someOwners = shipmentAdvisor.getBucketOwners(bucketId.intValue());
shipmentMap.put(bucketId, someOwners);
if (!customerMap.get(bucketId).equals(someOwners)) {
excuse = "customerMap at " + bucketId + " has wrong owners";
return false;
}
if (!orderMap.get(bucketId).equals(someOwners)) {
excuse = "orderMap at " + bucketId + " has wrong owners";
return false;
}
if (shipmentAdvisor.isPrimaryForBucket(bucketId.intValue())) {
shipmentPrimaryMap.put(bucketId, shipmentAdvisor.getPrimaryMemberForBucket(bucketId.intValue()).getId());
}
}
// verification for primary
if (customerPrimaryMap.size() != orderPrimaryMap.size()) {
excuse = "customerPrimaryMap and orderPrimaryMap have different sizes";
return false;
}
if (customerPrimaryMap.size() != shipmentPrimaryMap.size()) {
excuse = "customerPrimaryMap and shipmentPrimaryMap have different sizes";
return false;
}
if (!customerPrimaryMap.entrySet().equals(orderPrimaryMap.entrySet())) {
excuse = "customerPrimaryMap and orderPrimaryMap have different entrySets";
return false;
}
if (!customerPrimaryMap.entrySet().equals(shipmentPrimaryMap.entrySet())) {
excuse = "customerPrimaryMap and shipmentPrimaryMap have different entrySets";
return false;
}
if (!customerPrimaryMap.equals(orderPrimaryMap)) {
excuse = "customerPrimaryMap and orderPrimaryMap not equal";
return false;
}
if (!customerPrimaryMap.equals(shipmentPrimaryMap)) {
excuse = "customerPrimaryMap and shipmentPrimaryMap not equal";
return false;
}
// verification for all
if (customerMap.size() != orderMap.size()) {
excuse = "customerMap and orderMap have different sizes";
return false;
}
if (customerMap.size() != shipmentMap.size()) {
excuse = "customerMap and shipmentMap have different sizes";
return false;
}
if (!customerMap.entrySet().equals(orderMap.entrySet())) {
excuse = "customerMap and orderMap have different entrySets";
return false;
}
if (!customerMap.entrySet().equals(shipmentMap.entrySet())) {
excuse = "customerMap and shipmentMap have different entrySets";
return false;
}
if (!customerMap.equals(orderMap)) {
excuse = "customerMap and orderMap not equal";
return false;
}
if (!customerMap.equals(shipmentMap)) {
excuse = "customerMap and shipmentMap not equal";
return false;
}
return true;
}
Aggregations