use of org.apache.geode.cache.partition.PartitionRebalanceInfo in project geode by apache.
the class PartitionedRegionRebalanceOp method executeFPA.
/**
* For FPR we will creatd buckets and make primaries as specified by FixedPartitionAttributes. We
* have to just create buckets and make primaries for the local node.
*
* @return the details of the rebalance.
*/
public Set<PartitionRebalanceInfo> executeFPA() {
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing buckets for fixed partitioned region {}", this.targetRegion);
}
long start = System.nanoTime();
InternalCache cache = leaderRegion.getCache();
InternalResourceManager resourceManager = InternalResourceManager.getInternalResourceManager(cache);
InternalResourceManager.getResourceObserver().recoveryStarted(targetRegion);
try {
if (!checkAndSetColocatedRegions()) {
return Collections.emptySet();
}
// If I am a datastore of a FixedPartition, I will be hosting bucket so no
// need of redundancy check.
// No need to attach listener as well, we know that we are just creating bucket
// for primary and secondary. We are not creating extra bucket for any of peers
// who goes down.
PartitionedRegionLoadModel model = null;
Map<PartitionedRegion, InternalPRInfo> detailsMap = fetchDetails(cache);
BucketOperatorWrapper operator = getBucketOperator(detailsMap);
model = buildModel(operator, detailsMap, resourceManager);
for (PartitionRebalanceDetailsImpl details : operator.getDetailSet()) {
details.setPartitionMemberDetailsBefore(model.getPartitionedMemberDetails(details.getRegionPath()));
}
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing FPR {} Model:{}\n", leaderRegion, model);
}
director.initialize(model);
// This will perform all of the required operations.
director.nextStep();
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing FPR {} complete. Model:{}\n", leaderRegion, model);
}
long end = System.nanoTime();
for (PartitionRebalanceDetailsImpl details : operator.getDetailSet()) {
if (!simulate) {
details.setTime(end - start);
}
details.setPartitionMemberDetailsAfter(model.getPartitionedMemberDetails(details.getRegionPath()));
}
return Collections.<PartitionRebalanceInfo>unmodifiableSet(operator.getDetailSet());
} finally {
try {
InternalResourceManager.getResourceObserver().recoveryFinished(targetRegion);
} catch (Exception e) {
logger.debug(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_ERROR_IN_RESOURCE_OBSERVER), e);
}
}
}
use of org.apache.geode.cache.partition.PartitionRebalanceInfo in project geode by apache.
the class RebalanceOperationDUnitTest method recoverRedundancyWithOfflinePersistence.
public void recoverRedundancyWithOfflinePersistence(final boolean simulate, final boolean useAccessor) throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create(getUniqueName());
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
attr.setDiskSynchronous(true);
attr.setDiskStoreName(getUniqueName());
cache.createRegion("region1", attr.create());
}
};
// Create the region in only 2 VMs
vm0.invoke(createPrRegion);
vm1.invoke(createPrRegion);
VM rebalanceVM;
SerializableRunnable createAccessor = new SerializableRunnable(("createAccessor")) {
public void run() {
Cache cache = getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create("ds1");
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
paf.setLocalMaxMemory(0);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
if (useAccessor) {
// Create an accessor and reblance from that VM
vm3.invoke(createAccessor);
rebalanceVM = vm3;
} else {
rebalanceVM = vm0;
}
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
region.put(Integer.valueOf(3), "A");
region.put(Integer.valueOf(4), "A");
region.put(Integer.valueOf(5), "A");
region.put(Integer.valueOf(6), "A");
}
});
SerializableRunnable closeCache = new SerializableRunnable("close cache") {
public void run() {
Cache cache = getCache();
cache.getRegion("region1").close();
}
};
// Close the cache in vm1
final Set<Integer> vm1Buckets = getBucketList("region1", vm1);
vm1.invoke(closeCache);
SerializableRunnable checkLowRedundancyBeforeRebalance = new SerializableRunnable("checkLowRedundancyBeforeRebalance") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(6, details.getLowRedundancyBucketCount());
}
};
SerializableRunnable checkLowRedundancyAfterRebalance = new SerializableRunnable("checkLowRedundancyAfterRebalance") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
}
};
// make sure we can tell that the buckets have low redundancy
vm0.invoke(checkLowRedundancyBeforeRebalance);
// Now create the cache in another member
vm2.invoke(createPrRegion);
// Make sure we still have low redundancy
vm0.invoke(checkLowRedundancyBeforeRebalance);
/*
* Simulates a rebalance if simulation flag is set. Otherwise, performs a rebalance.
*
* A rebalance will replace offline buckets, so this should restore redundancy
*/
rebalanceVM.invoke(new SerializableRunnable("simulateRebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(6, results.getTotalBucketCreatesCompleted());
assertEquals(3, results.getTotalPrimaryTransfersCompleted());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(6, details.getBucketCreatesCompleted());
assertEquals(3, details.getPrimaryTransfersCompleted());
assertEquals(0, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(6, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
if (!simulate) {
verifyStats(manager, results);
}
}
});
Set<Integer> vm0Buckets = getBucketList("region1", vm0);
Set<Integer> vm2Buckets = getBucketList("region1", vm2);
// Make sure redundancy is repaired if not simulated
if (!simulate) {
vm0.invoke(checkLowRedundancyAfterRebalance);
} else {
// Othewise, we should still have broken redundancy at this point
vm0.invoke(checkLowRedundancyBeforeRebalance);
}
vm2.invoke(closeCache);
vm0.invoke(closeCache);
if (useAccessor) {
vm3.invoke(closeCache);
}
// We need to restart both VMs at the same time, because
// they will wait for each other before allowing operations.
AsyncInvocation async0 = vm0.invokeAsync(createPrRegion);
AsyncInvocation async2 = vm2.invokeAsync(createPrRegion);
async0.getResult(30000);
async0.getResult(30000);
if (useAccessor) {
vm3.invoke(createAccessor);
}
// pause for async bucket recovery threads to finish their work. Otherwise
// the rebalance op may think that the other member doesn't have buckets, then
// ask it to create them and get a negative reply because it actually does
// have the buckets, causing the test to fail
Wait.pause(10000);
// or it might not.
if (!simulate) {
rebalanceVM.invoke(new SerializableRunnable("rebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(6, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
}
});
// Redundancy should be repaired.
vm0.invoke(checkLowRedundancyAfterRebalance);
}
vm1.invoke(createPrRegion);
// Look at vm0 buckets.
assertEquals(vm0Buckets, getBucketList("region1", vm0));
/*
* Look at vm1 buckets.
*/
if (!simulate) {
/*
* vm1 should have no buckets because offline buckets were recovered when vm0 and vm2 were
* rebalanced above.
*/
assertEquals(0, getBucketList("region1", vm1).size());
} else {
/*
* No rebalancing above because the simulation flag is on. Therefore, vm1 will have recovered
* its buckets. We need to wait for the buckets because they might still be in the middle of
* creation in the background
*/
waitForBucketList("region1", vm1, vm1Buckets);
}
// look at vm2 buckets
assertEquals(vm2Buckets, getBucketList("region1", vm2));
}
use of org.apache.geode.cache.partition.PartitionRebalanceInfo in project geode by apache.
the class RebalanceOperationDUnitTest method moveBucketsWithRedundancy.
/**
* Test to make sure we balance buckets between three hosts with redundancy
*/
public void moveBucketsWithRedundancy(final boolean simulate) {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
// Create the region in two VMs
vm0.invoke(createPrRegion);
vm1.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
for (int i = 0; i < 12; i++) {
region.put(Integer.valueOf(i), "A");
}
}
});
// Create the region in one more VM.
vm2.invoke(createPrRegion);
// Now simulate a rebalance
final Long totalSize = (Long) vm0.invoke(new SerializableCallable("simulateRebalance") {
public Object call() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
// We don't know how many primaries will move, it depends on
// if the move bucket code moves the primary or a redundant bucket
// assertIndexDetailsEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(8, results.getTotalBucketTransfersCompleted());
assertTrue(0 < results.getTotalBucketTransferBytes());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(0, details.getBucketCreatesCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(8, details.getBucketTransfersCompleted());
long totalSize = 0;
Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsAfter();
for (PartitionMemberInfo memberDetails : beforeDetails) {
totalSize += memberDetails.getSize();
}
long afterSize = 0;
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(3, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(8, memberDetails.getBucketCount());
assertEquals(4, memberDetails.getPrimaryCount());
afterSize += memberDetails.getSize();
}
assertEquals(totalSize, afterSize);
if (!simulate) {
verifyStats(manager, results);
}
return Long.valueOf(totalSize);
}
});
if (!simulate) {
SerializableRunnable checkBalance = new SerializableRunnable("checkBalance") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(12, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
long afterSize = 0;
for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
assertEquals(8, memberDetails.getBucketCount());
assertEquals(4, memberDetails.getPrimaryCount());
afterSize += memberDetails.getSize();
}
assertEquals(totalSize.longValue(), afterSize);
}
};
vm0.invoke(checkBalance);
vm1.invoke(checkBalance);
vm2.invoke(checkBalance);
}
}
use of org.apache.geode.cache.partition.PartitionRebalanceInfo in project geode by apache.
the class RebalanceOperationDUnitTest method filterRegions.
/**
* Check to make sure that we balance buckets between two hosts with no redundancy.
*
* @param simulate
*/
public void filterRegions(final boolean simulate) {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final int NUM_REGIONS = 4;
final Set<String> INCLUDED = new HashSet<String>();
INCLUDED.add("region0");
INCLUDED.add("region1");
final Set<String> EXCLUDED = new HashSet<String>();
EXCLUDED.add("region0");
EXCLUDED.add("region3");
final HashSet<String> EXPECTED_REBALANCED = new HashSet<String>();
EXPECTED_REBALANCED.add("/region0");
EXPECTED_REBALANCED.add("/region1");
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
for (int i = 0; i < NUM_REGIONS; i++) {
cache.createRegion("region" + i, attr.create());
}
}
};
// Create the region in only 1 VM
vm0.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
for (int i = 0; i < NUM_REGIONS; i++) {
Region region = cache.getRegion("region" + i);
for (int j = 0; j < 6; j++) {
region.put(Integer.valueOf(j), "A");
}
}
}
});
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Now simulate a rebalance
vm0.invoke(new SerializableRunnable("simulateRebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(simulate, manager, INCLUDED, EXCLUDED);
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
// assertIndexDetailsEquals(3, detailSet.size());
Set<String> names = new HashSet<String>();
for (PartitionRebalanceInfo details : detailSet) {
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(3, details.getBucketTransfersCompleted());
names.add(details.getRegionPath());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(3, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
}
assertEquals(EXPECTED_REBALANCED, names);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(6, results.getTotalBucketTransfersCompleted());
assertTrue(0 < results.getTotalBucketTransferBytes());
if (!simulate) {
verifyStats(manager, results);
}
}
});
if (!simulate) {
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {
public void run() {
Cache cache = getCache();
for (String name : EXPECTED_REBALANCED) {
Region region = cache.getRegion(name);
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(2, details.getPartitionMemberInfo().size());
for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
assertEquals(3, memberDetails.getBucketCount());
assertEquals(3, memberDetails.getPrimaryCount());
}
}
Region region = cache.getRegion("region2");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
assertEquals(2, details.getPartitionMemberInfo().size());
for (PartitionMemberInfo memberDetails : details.getPartitionMemberInfo()) {
int bucketCount = memberDetails.getBucketCount();
int primaryCount = memberDetails.getPrimaryCount();
assertTrue("Wrong number of buckets on non rebalanced region buckets=" + bucketCount + " primarys=" + primaryCount, bucketCount == 6 && primaryCount == 6 || bucketCount == 0 && primaryCount == 0);
}
}
};
vm0.invoke(checkRedundancyFixed);
vm1.invoke(checkRedundancyFixed);
}
}
use of org.apache.geode.cache.partition.PartitionRebalanceInfo in project geode by apache.
the class RebalanceOperationDUnitTest method testEnforceZoneWithMultipleRegions.
@Test
public void testEnforceZoneWithMultipleRegions() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
try {
setRedundancyZone(vm0, "A");
setRedundancyZone(vm1, "A");
final DistributedMember zoneBMember = setRedundancyZone(vm2, "B");
SerializableRunnable setRebalanceObserver = new SerializableRunnable("RebalanceObserver") {
@Override
public void run() {
InternalResourceManager.setResourceObserver(new ParallelRecoveryObserver(2));
}
};
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
ParallelRecoveryObserver ob = (ParallelRecoveryObserver) InternalResourceManager.getResourceObserver();
ob.observeRegion("region1");
ob.observeRegion("region2");
createPR("region1");
createPR("region2");
}
};
vm0.invoke(setRebalanceObserver);
// Create the region in only 1 VM
vm0.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
doPuts("region1");
doPuts("region2");
}
});
SerializableRunnable checkLowRedundancy = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(6, details.getLowRedundancyBucketCount());
region = cache.getRegion("region2");
details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(6, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(6, details.getLowRedundancyBucketCount());
}
};
// make sure we can tell that the buckets have low redundancy
vm0.invoke(checkLowRedundancy);
// Create the region in the other VMs (should have no effect)
vm1.invoke(setRebalanceObserver);
vm1.invoke(createPrRegion);
vm2.invoke(setRebalanceObserver);
vm2.invoke(createPrRegion);
// Make sure we still have low redundancy
vm0.invoke(checkLowRedundancy);
// Now do a rebalance
vm0.invoke(new SerializableRunnable("simulateRebalance") {
public void run() {
Cache cache = getCache();
ResourceManager manager = cache.getResourceManager();
RebalanceResults results = doRebalance(false, manager);
// We expect to satisfy redundancy with the zone B member
assertEquals(12, results.getTotalBucketCreatesCompleted());
// 2 primaries will go to vm2, leaving vm0 and vm1 with 2 primaries each
assertEquals(4, results.getTotalPrimaryTransfersCompleted());
// We actually *will* transfer 3 buckets to the other member in zone A, because that
// improves
// the balance
assertEquals(6, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(2, detailSet.size());
for (PartitionRebalanceInfo details : detailSet) {
assertEquals(6, details.getBucketCreatesCompleted());
assertEquals(2, details.getPrimaryTransfersCompleted());
assertEquals(3, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
for (PartitionMemberInfo info : afterDetails) {
if (info.getDistributedMember().equals(zoneBMember)) {
assertEquals(6, info.getBucketCount());
} else {
assertEquals(3, info.getBucketCount());
}
assertEquals(2, info.getPrimaryCount());
}
}
// assertIndexDetailsEquals(0, details.getBucketTransferBytes());
verifyStats(manager, results);
}
});
vm0.invoke(new SerializableRunnable() {
@Override
public void run() {
assertTrue(((ParallelRecoveryObserver) InternalResourceManager.getResourceObserver()).isObserverCalled());
}
});
checkBucketCount(vm0, "region1", 3);
checkBucketCount(vm1, "region1", 3);
checkBucketCount(vm2, "region1", 6);
checkBucketCount(vm0, "region2", 3);
checkBucketCount(vm1, "region2", 3);
checkBucketCount(vm2, "region2", 6);
} finally {
disconnectFromDS();
Invoke.invokeInEveryVM(new SerializableRunnable() {
public void run() {
// clear the redundancy zone setting
disconnectFromDS();
}
});
}
}
Aggregations