use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class ResourceManagerDUnitTest method testRemoveBucketMessage.
@Test
public void testRemoveBucketMessage() {
final String[] regionPath = new String[] { getUniqueName() + "-PR-0" };
final int[] numBuckets = new int[] { 1 };
final int[] redundantCopies = new int[] { 1 };
// localMaxMemory config to use for 2 members
final int[] localMaxMemory = new int[] { 100, 100 };
createRegion(Host.getHost(0).getVM(0), regionPath[0], localMaxMemory[0], numBuckets[0], redundantCopies[0]);
createRegion(Host.getHost(0).getVM(1), regionPath[0], localMaxMemory[1], numBuckets[0], redundantCopies[0]);
final Integer bucketKey = Integer.valueOf(0);
// 2 MB in size
final byte[] value = new byte[1];
createBucket(0, regionPath[0], bucketKey, value);
// identify the members and their config values
final InternalDistributedMember[] members = new InternalDistributedMember[2];
final long[] memberSizes = new long[members.length];
final int[] memberBucketCounts = new int[members.length];
final int[] memberPrimaryCounts = new int[members.length];
fillValidationArrays(members, memberSizes, memberBucketCounts, memberPrimaryCounts, regionPath[0]);
int primaryVM = -1;
int otherVM = -1;
for (int i = 0; i < memberPrimaryCounts.length; i++) {
if (memberPrimaryCounts[i] == 0) {
otherVM = i;
} else if (memberPrimaryCounts[i] == 1) {
// found the primary
primaryVM = i;
}
}
assertTrue(primaryVM > -1);
assertTrue(otherVM > -1);
assertTrue(primaryVM != otherVM);
final int finalOtherVM = otherVM;
Host.getHost(0).getVM(otherVM).invoke(new SerializableRunnable() {
public void run() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
Bucket bucket = pr.getRegionAdvisor().getBucket(0);
assertTrue("Target member is not hosting bucket to remove", bucket.isHosting());
assertNotNull("Bucket is null on target member", bucket);
assertNotNull("BucketRegion is null on target member", bucket.getBucketAdvisor().getProxyBucketRegion().getHostedBucketRegion());
}
});
boolean sentRemoveBucket = ((Boolean) Host.getHost(0).getVM(primaryVM).invoke(new SerializableCallable() {
public Object call() {
InternalDistributedMember recipient = members[finalOtherVM];
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
RemoveBucketResponse response = RemoveBucketMessage.send(recipient, pr, 0, false);
if (response != null) {
response.waitForRepliesUninterruptibly();
return true;
} else {
return Boolean.FALSE;
}
}
})).booleanValue();
assertTrue("Failed to get reply to RemoveBucketMessage", sentRemoveBucket);
Host.getHost(0).getVM(otherVM).invoke(new SerializableRunnable() {
public void run() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath[0]);
Bucket bucket = pr.getRegionAdvisor().getBucket(0);
BucketRegion bucketRegion = bucket.getBucketAdvisor().getProxyBucketRegion().getHostedBucketRegion();
assertFalse("Target member is still hosting removed bucket", bucket.isHosting());
assertNull("BucketRegion is not null on target member", bucketRegion);
}
});
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class PRBucketSynchronizationDUnitTest method verifySynchronized.
private void verifySynchronized(VM vm, final InternalDistributedMember crashedMember) {
vm.invoke(new SerializableCallable("check that synchronization happened") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) TestRegion;
final BucketRegion bucket = pr.getDataStore().getLocalBucketById(0);
Wait.waitForCriterion(new WaitCriterion() {
String waitingFor = "primary is still in membership view: " + crashedMember;
boolean dumped = false;
public boolean done() {
if (TestRegion.getCache().getDistributionManager().isCurrentMember(crashedMember)) {
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
if (!TestRegion.containsKey("Object3")) {
waitingFor = "entry for Object3 not found";
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
RegionEntry re = bucket.getRegionMap().getEntry("Object5");
if (re == null) {
if (!dumped) {
dumped = true;
bucket.dumpBackingMap();
}
waitingFor = "entry for Object5 not found";
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
if (!re.isTombstone()) {
if (!dumped) {
dumped = true;
bucket.dumpBackingMap();
}
waitingFor = "Object5 is not a tombstone but should be: " + re;
LogWriterUtils.getLogWriter().info(waitingFor);
return false;
}
return true;
}
public String description() {
return waitingFor;
}
}, 30000, 5000, true);
return null;
}
});
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class RebalanceOperationDUnitTest method moveBucketsWithUnrecoveredValuesRedundancy.
/**
* Test to ensure that we wait for in progress write operations before moving a primary.
*
* @throws CancellationException
*/
public void moveBucketsWithUnrecoveredValuesRedundancy(final boolean simulate) {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
try {
Cache cache = getCache();
if (cache.findDiskStore("store") == null) {
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).setMaxOplogSize(1).create("store");
}
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
attr.setDiskStoreName("store");
attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
paf.setRedundantCopies(0);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
attr.setCacheLoader(new Bug40228Loader());
cache.createRegion("region1", attr.create());
} finally {
System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
}
}
};
// Create the region in only 1 VM
vm0.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
region.put(Integer.valueOf(3), "A");
region.put(Integer.valueOf(4), "A");
region.put(Integer.valueOf(5), "A");
region.put(Integer.valueOf(6), "A");
}
});
final long[] bucketSizes = (long[]) vm0.invoke(new SerializableCallable("get sizes and close cache") {
public Object call() {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1");
PartitionedRegionDataStore dataStore = region.getDataStore();
long[] bucketSizes = new long[7];
for (int i = 1; i <= 6; i++) {
BucketRegion bucket = dataStore.getLocalBucketById(i);
bucketSizes[i] = bucket.getTotalBytes();
assertEquals(0, bucket.getNumOverflowBytesOnDisk());
assertEquals(0, bucket.getNumOverflowOnDisk());
assertEquals(1, bucket.getNumEntriesInVM());
}
getCache().close();
return bucketSizes;
}
});
// now recover the region
vm0.invoke(createPrRegion);
vm0.invoke(new SerializableRunnable("check sizes") {
public void run() {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1");
PartitionedRegionDataStore dataStore = region.getDataStore();
for (int i = 1; i <= 6; i++) {
BucketRegion bucket = dataStore.getLocalBucketById(i);
assertEquals(1, bucket.getNumOverflowOnDisk());
assertEquals(0, bucket.getNumEntriesInVM());
// the size recorded on disk is not the same is the in memory size, apparently
assertTrue("Bucket size was " + bucket.getNumOverflowBytesOnDisk(), 1 < bucket.getNumOverflowBytesOnDisk());
assertEquals(bucket.getNumOverflowBytesOnDisk(), bucket.getTotalBytes());
}
}
});
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Now simulate a rebalance
vm0.invoke(new SerializableRunnable("simulateRebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(3, results.getTotalBucketTransfersCompleted());
assertTrue("Transfered Bytes = " + results.getTotalBucketTransferBytes(), 0 < results.getTotalBucketTransferBytes());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(3, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(3, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
if (!simulate) {
verifyStats(manager, results);
}
}
});
if (!simulate) {
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(2, details.getPartitionMemberInfo().size());
for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
assertEquals(3, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
// check to make sure that moving buckets didn't close the cache loader
Bug40228Loader loader = (Bug40228Loader) cache.getRegion("region1").getAttributes().getCacheLoader();
assertFalse(loader.isClosed());
}
};
vm0.invoke(checkRedundancyFixed);
vm1.invoke(checkRedundancyFixed);
SerializableRunnable checkBug40228Fixed = new SerializableRunnable("checkBug40228Fixed") {
public void run() {
Cache cache = getCache();
Bug40228Loader loader = (Bug40228Loader) cache.getRegion("region1").getAttributes().getCacheLoader();
assertFalse(loader.isClosed());
// check to make sure that closing the PR closes the cache loader
cache.getRegion("region1").close();
assertTrue(loader.isClosed());
}
};
vm0.invoke(checkBug40228Fixed);
vm1.invoke(checkBug40228Fixed);
}
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class CompiledRegion method evaluate.
public Object evaluate(ExecutionContext context) throws RegionNotFoundException {
Region rgn;
Cache cache = context.getCache();
// do PR bucketRegion substitution here for expressions that evaluate to a Region.
PartitionedRegion pr = context.getPartitionedRegion();
if (pr != null && pr.getFullPath().equals(this.regionPath)) {
rgn = context.getBucketRegion();
} else if (pr != null) {
// Asif : This is a very tricky solution to allow equijoin queries on PartitionedRegion
// locally
// We have possibly got a situation of equijoin. it may be across PRs. so use the context's
// bucket region
// to get ID and then retrieve the this region's bucket region
BucketRegion br = context.getBucketRegion();
int bucketID = br.getId();
// Is current region a partitioned region
rgn = cache.getRegion(this.regionPath);
if (rgn.getAttributes().getDataPolicy().withPartitioning()) {
// convert it into bucket region.
PartitionedRegion prLocal = (PartitionedRegion) rgn;
rgn = prLocal.getDataStore().getLocalBucketById(bucketID);
}
} else {
rgn = cache.getRegion(this.regionPath);
}
if (rgn == null) {
// a CacheClosedException
if (cache.isClosed()) {
throw new CacheClosedException();
}
throw new RegionNotFoundException(LocalizedStrings.CompiledRegion_REGION_NOT_FOUND_0.toLocalizedString(this.regionPath));
}
if (context.isCqQueryContext()) {
return new QRegion(rgn, true, context);
} else {
return new QRegion(rgn, false, context);
}
}
use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.
the class CompactMapRangeIndex method doIndexAddition.
protected void doIndexAddition(Object mapKey, Object indexKey, Object value, RegionEntry entry) throws IMQException {
if (indexKey == null) {
indexKey = IndexManager.NULL;
}
if (mapKey == null) {
mapKey = IndexManager.NULL;
}
boolean isPr = this.region instanceof BucketRegion;
// Get RangeIndex for it or create it if absent
CompactRangeIndex rg = (CompactRangeIndex) this.mapKeyToValueIndex.get(mapKey);
if (rg == null) {
// use previously created MapRangeIndexStatistics
IndexStatistics stats = this.internalIndexStats;
PartitionedIndex prIndex = null;
if (isPr) {
prIndex = (PartitionedIndex) this.getPRIndex();
prIndex.incNumMapKeysStats(mapKey);
}
rg = new CompactRangeIndex(indexName + "-" + mapKey, region, fromClause, indexedExpression, projectionAttributes, this.originalFromClause, this.originalIndexedExpression, this.canonicalizedDefinitions, stats);
rg.instantiateEvaluator(this.ich, ((AbstractIndex.IMQEvaluator) this.evaluator).getIndexResultSetType());
this.mapKeyToValueIndex.put(mapKey, rg);
if (!isPr) {
this.internalIndexStats.incNumMapIndexKeys(1);
}
}
long start = System.nanoTime();
rg.addMapping(indexKey, value, entry);
// This call is skipped when addMapping is called from MapRangeIndex
// rg.internalIndexStats.incNumUpdates();
this.internalIndexStats.incUpdatesInProgress(-1);
long end = System.nanoTime() - start;
this.internalIndexStats.incUpdateTime(end);
this.internalIndexStats.incNumUpdates();
// add to mapkey to indexkey map
Map mapKeyToIndexKey = this.entryToMapKeyIndexKeyMap.get(entry);
if (mapKeyToIndexKey == null) {
mapKeyToIndexKey = new HashMap();
entryToMapKeyIndexKeyMap.put(entry, mapKeyToIndexKey);
}
mapKeyToIndexKey.put(mapKey, indexKey);
}
Aggregations