use of org.apache.geode.cache.control.RebalanceOperation in project geode by apache.
the class RegionHelper method rebalanceRegion.
public static RebalanceResults rebalanceRegion(Region region) throws CancellationException, InterruptedException {
// FilterByName only looks at name and not full path
String regionName = region.getName();
if (!PartitionRegionHelper.isPartitionedRegion(region)) {
StringBuilder builder = new StringBuilder();
builder.append("Region ").append(regionName).append(" is not partitioned. Instead, it is ").append(region.getAttributes().getDataPolicy()).append(". It can't be rebalanced.");
throw new IllegalArgumentException(builder.toString());
}
// Rebalance the region
ResourceManager resourceManager = region.getCache().getResourceManager();
RebalanceFactory rebalanceFactory = resourceManager.createRebalanceFactory();
Set<String> regionsToRebalance = new HashSet<String>();
regionsToRebalance.add(regionName);
rebalanceFactory.includeRegions(regionsToRebalance);
RebalanceOperation rebalanceOperation = rebalanceFactory.start();
// Return the results
return rebalanceOperation.getResults();
}
use of org.apache.geode.cache.control.RebalanceOperation in project geode by apache.
the class RegionHelper method rebalanceCache.
public static RebalanceResults rebalanceCache(GemFireCache cache) throws CancellationException, InterruptedException {
ResourceManager resourceManager = cache.getResourceManager();
RebalanceFactory rebalanceFactory = resourceManager.createRebalanceFactory();
RebalanceOperation rebalanceOperation = rebalanceFactory.start();
return rebalanceOperation.getResults();
}
use of org.apache.geode.cache.control.RebalanceOperation in project geode by apache.
the class PartitionedRegionQueryDUnitTest method testRebalanceWithIndex.
/**
* Test of bug 50749 1. Indexes and Buckets are created on several nodes 2. Buckets are moved 3.
* Check to make sure we don't have lingering bucket indexes with bucket regions already destroyed
*/
@Test
public void testRebalanceWithIndex() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
createAccessor(vm0);
createPR(vm1);
createPR(vm2);
createIndex(vm1, "prIndex", "r.score", "/region r");
// Do Puts
vm1.invoke(new SerializableRunnable("putting data") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region");
for (int i = 0; i < 2000; i++) {
region.put(i, new TestObject(i));
}
}
});
createPR(vm3);
// Rebalance
vm1.invoke(new SerializableRunnable("rebalance") {
public void run() {
RebalanceOperation rebalance = getCache().getResourceManager().createRebalanceFactory().start();
// wait for the rebalance
try {
rebalance.getResults();
} catch (CancellationException e) {
// ignore
} catch (InterruptedException e) {
// ignore
}
}
});
checkForLingeringBucketIndexes(vm1, "prIndex");
checkForLingeringBucketIndexes(vm2, "prIndex");
closeCache(vm1, vm2, vm3, vm0);
}
use of org.apache.geode.cache.control.RebalanceOperation in project geode by apache.
the class PartitionedRegionStatsDUnitTest method testDataStoreEntryCount2WithRebalance.
@Test
public void testDataStoreEntryCount2WithRebalance() throws InterruptedException {
final Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
RebalanceOperation op = cache.getResourceManager().createRebalanceFactory().start();
try {
RebalanceResults results = op.getResults();
} catch (Exception e) {
Assert.fail("ex", e);
}
}
};
vm0.invoke(createPrRegion);
vm0.invoke(new SerializableRunnable("Put some data") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Long.valueOf(0), "A");
region.put(Long.valueOf(1), "A");
region.put(Long.valueOf(2), "A");
region.put(Long.valueOf(3), "A");
region.put(Long.valueOf(4), "A");
region.put(Long.valueOf(5), "A");
}
});
vm1.invoke(createPrRegion);
validateEntryCount(vm0, 3);
validateEntryCount(vm1, 3);
}
use of org.apache.geode.cache.control.RebalanceOperation in project geode by apache.
the class RebalanceOperationDUnitTest method runTestWaitForOperation.
/**
* Test to ensure that we wait for in progress write operations before moving a primary.
*
* @throws Exception
*/
public void runTestWaitForOperation(final Operation op) throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return "anobject";
}
public void close() {
}
});
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
paf.setLocalMaxMemory(100);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
// Create a region in this VM with a cache writer
// and cache loader
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return "anobject";
}
public void close() {
}
});
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
paf.setLocalMaxMemory(100);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
final Region region = cache.createRegion("region1", attr.create());
// create some buckets
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
BlockingCacheListener cacheWriter = new BlockingCacheListener(2);
region.getAttributesMutator().addCacheListener(cacheWriter);
// start two threads doing operations, one on each bucket
// the threads will block on the cache writer. The rebalance operation
// will try to move one of these buckets, but it shouldn't
// be able to because of the in progress operation.
Thread thread1 = new Thread() {
public void run() {
op.execute(region, Integer.valueOf(1));
}
};
thread1.start();
Thread thread2 = new Thread() {
public void run() {
op.execute(region, Integer.valueOf(2));
}
};
thread2.start();
cacheWriter.waitForOperationsStarted();
SerializableRunnable checkLowRedundancy = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(2, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(2, details.getLowRedundancyBucketCount());
}
};
// make sure we can tell that the buckets have low redundancy
checkLowRedundancy.run();
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Make sure we still have low redundancy
checkLowRedundancy.run();
ResourceManager manager = cache.getResourceManager();
RebalanceOperation rebalance = manager.createRebalanceFactory().start();
try {
rebalance.getResults(5, TimeUnit.SECONDS);
fail("Operation should not have completed");
} catch (TimeoutException expected) {
// do nothing
}
cacheWriter.release();
LogWriterUtils.getLogWriter().info("starting wait for rebalance. Will wait for " + MAX_WAIT + " seconds");
RebalanceResults results = rebalance.getResults(MAX_WAIT, TimeUnit.SECONDS);
assertEquals(2, results.getTotalBucketCreatesCompleted());
assertEquals(1, results.getTotalPrimaryTransfersCompleted());
assertEquals(0, results.getTotalBucketTransferBytes());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(2, details.getBucketCreatesCompleted());
assertEquals(1, details.getPrimaryTransfersCompleted());
assertEquals(0, details.getBucketTransferBytes());
assertEquals(0, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(2, memberDetails.getBucketCount());
assertEquals(1, memberDetails.getPrimaryCount());
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(2, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
}
};
checkRedundancyFixed.run();
vm1.invoke(checkRedundancyFixed);
}
}
Aggregations