use of org.apache.geode.internal.cache.PartitionedRegionDataStore in project geode by apache.
the class ResourceManagerDUnitTest method fillValidationArrays.
private void fillValidationArrays(final InternalDistributedMember[] members, final long[] memberSizes, final int[] memberBucketCounts, final int[] memberPrimaryCounts, final String regionPath) {
for (int i = 0; i < members.length; i++) {
final int vm = i;
members[vm] = (InternalDistributedMember) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
return getSystem().getDistributedMember();
}
});
memberSizes[vm] = ((Long) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath);
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds == null) {
return Long.valueOf(0);
} else {
return Long.valueOf(getSize(ds));
}
}
})).longValue();
memberBucketCounts[vm] = ((Integer) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath);
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds == null) {
return new Integer(0);
} else {
return new Integer(ds.getBucketsManaged());
}
}
})).intValue();
memberPrimaryCounts[vm] = ((Integer) Host.getHost(0).getVM(vm).invoke(new SerializableCallable() {
public Object call() {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(regionPath);
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds == null) {
return new Integer(0);
} else {
return new Integer(ds.getNumberOfPrimaryBucketsManaged());
}
}
})).intValue();
}
}
use of org.apache.geode.internal.cache.PartitionedRegionDataStore in project geode by apache.
the class RebalanceOperationDUnitTest method moveBucketsWithUnrecoveredValuesRedundancy.
/**
* Test to ensure that we wait for in progress write operations before moving a primary.
*
* @throws CancellationException
*/
public void moveBucketsWithUnrecoveredValuesRedundancy(final boolean simulate) {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "false");
try {
Cache cache = getCache();
if (cache.findDiskStore("store") == null) {
cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).setMaxOplogSize(1).create("store");
}
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
attr.setDiskStoreName("store");
attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
paf.setRedundantCopies(0);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
attr.setCacheLoader(new Bug40228Loader());
cache.createRegion("region1", attr.create());
} finally {
System.setProperty(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, "true");
}
}
};
// Create the region in only 1 VM
vm0.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
region.put(Integer.valueOf(3), "A");
region.put(Integer.valueOf(4), "A");
region.put(Integer.valueOf(5), "A");
region.put(Integer.valueOf(6), "A");
}
});
final long[] bucketSizes = (long[]) vm0.invoke(new SerializableCallable("get sizes and close cache") {
public Object call() {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1");
PartitionedRegionDataStore dataStore = region.getDataStore();
long[] bucketSizes = new long[7];
for (int i = 1; i <= 6; i++) {
BucketRegion bucket = dataStore.getLocalBucketById(i);
bucketSizes[i] = bucket.getTotalBytes();
assertEquals(0, bucket.getNumOverflowBytesOnDisk());
assertEquals(0, bucket.getNumOverflowOnDisk());
assertEquals(1, bucket.getNumEntriesInVM());
}
getCache().close();
return bucketSizes;
}
});
// now recover the region
vm0.invoke(createPrRegion);
vm0.invoke(new SerializableRunnable("check sizes") {
public void run() {
PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1");
PartitionedRegionDataStore dataStore = region.getDataStore();
for (int i = 1; i <= 6; i++) {
BucketRegion bucket = dataStore.getLocalBucketById(i);
assertEquals(1, bucket.getNumOverflowOnDisk());
assertEquals(0, bucket.getNumEntriesInVM());
// the size recorded on disk is not the same is the in memory size, apparently
assertTrue("Bucket size was " + bucket.getNumOverflowBytesOnDisk(), 1 < bucket.getNumOverflowBytesOnDisk());
assertEquals(bucket.getNumOverflowBytesOnDisk(), bucket.getTotalBytes());
}
}
});
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Now simulate a rebalance
vm0.invoke(new SerializableRunnable("simulateRebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(3, results.getTotalBucketTransfersCompleted());
assertTrue("Transfered Bytes = " + results.getTotalBucketTransferBytes(), 0 < results.getTotalBucketTransferBytes());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(3, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(3, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
if (!simulate) {
verifyStats(manager, results);
}
}
});
if (!simulate) {
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(2, details.getPartitionMemberInfo().size());
for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
assertEquals(3, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
// check to make sure that moving buckets didn't close the cache loader
Bug40228Loader loader = (Bug40228Loader) cache.getRegion("region1").getAttributes().getCacheLoader();
assertFalse(loader.isClosed());
}
};
vm0.invoke(checkRedundancyFixed);
vm1.invoke(checkRedundancyFixed);
SerializableRunnable checkBug40228Fixed = new SerializableRunnable("checkBug40228Fixed") {
public void run() {
Cache cache = getCache();
Bug40228Loader loader = (Bug40228Loader) cache.getRegion("region1").getAttributes().getCacheLoader();
assertFalse(loader.isClosed());
// check to make sure that closing the PR closes the cache loader
cache.getRegion("region1").close();
assertTrue(loader.isClosed());
}
};
vm0.invoke(checkBug40228Fixed);
vm1.invoke(checkBug40228Fixed);
}
}
use of org.apache.geode.internal.cache.PartitionedRegionDataStore in project geode by apache.
the class ParallelGatewaySenderQueueJUnitTest method testLocalSize.
@Test
public void testLocalSize() throws Exception {
ParallelGatewaySenderQueueMetaRegion mockMetaRegion = mock(ParallelGatewaySenderQueueMetaRegion.class);
PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
when(mockMetaRegion.getDataStore()).thenReturn(dataStore);
when(dataStore.getSizeOfLocalPrimaryBuckets()).thenReturn(3);
when(metaRegionFactory.newMetataRegion(any(), any(), any(), any())).thenReturn(mockMetaRegion);
when(cache.createVMRegion(any(), any(), any())).thenReturn(mockMetaRegion);
queue.addShadowPartitionedRegionForUserPR(mockPR("region1"));
assertEquals(3, queue.localSize());
}
use of org.apache.geode.internal.cache.PartitionedRegionDataStore in project geode by apache.
the class PartitionedIndex method getBucketIndex.
/**
* Returns the index for the bucket.
*/
public static AbstractIndex getBucketIndex(PartitionedRegion pr, String indexName, Integer bId) throws QueryInvocationTargetException {
try {
pr.checkReadiness();
} catch (Exception ex) {
throw new QueryInvocationTargetException(ex.getMessage());
}
PartitionedRegionDataStore prds = pr.getDataStore();
BucketRegion bukRegion;
bukRegion = (BucketRegion) prds.getLocalBucketById(bId);
if (bukRegion == null) {
throw new BucketMovedException("Bucket not found for the id :" + bId);
}
AbstractIndex index = null;
if (bukRegion.getIndexManager() != null) {
index = (AbstractIndex) (bukRegion.getIndexManager().getIndex(indexName));
} else {
if (pr.getCache().getLogger().fineEnabled()) {
pr.getCache().getLogger().fine("Index Manager not found for the bucket region " + bukRegion.getFullPath() + " unable to fetch the index " + indexName);
}
throw new QueryInvocationTargetException("Index Manager not found, " + " unable to fetch the index " + indexName);
}
return index;
}
use of org.apache.geode.internal.cache.PartitionedRegionDataStore in project geode by apache.
the class BucketBackupMessage method operateOnPartitionedRegion.
@Override
protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException {
// d-lock.
if (pr == null || !pr.isInitialized()) {
return false;
}
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "BucketBackupMessage operateOnRegion: {}", pr.getFullPath());
}
PartitionedRegionDataStore ds = pr.getDataStore();
if (ds != null) {
pr.getRedundancyProvider().finishIncompleteBucketCreation(bucketId);
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.BucketBackupMessage_BUCKETBACKUPMESSAGE_DATA_STORE_NOT_CONFIGURED_FOR_THIS_MEMBER));
}
pr.getPrStats().endPartitionMessagesProcessing(startTime);
return false;
}
Aggregations