use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class RebalanceOperationDUnitTest method testMembershipChange.
/**
* Test that the rebalancing operation picks up on a concurrent membership change
*/
@Test
public void testMembershipChange() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
final SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(0);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
// Create the region in only 1 VM
vm0.invoke(createPrRegion);
// Create some buckets
vm0.invoke(new SerializableRunnable("createSomeBuckets") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
region.put(Integer.valueOf(3), "A");
region.put(Integer.valueOf(4), "A");
region.put(Integer.valueOf(5), "A");
region.put(Integer.valueOf(6), "A");
}
});
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Now do a rebalance, but start another member in the middle
vm0.invoke(new SerializableCallable("D rebalance") {
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager manager = cache.getInternalResourceManager();
final CountDownLatch rebalancingStarted = new CountDownLatch(1);
final CountDownLatch memberAdded = new CountDownLatch(1);
InternalResourceManager.setResourceObserver(new ResourceObserverAdapter() {
boolean firstBucket = true;
@Override
public void movingBucket(Region region, int bucketId, DistributedMember source, DistributedMember target) {
if (firstBucket) {
firstBucket = false;
vm2.invoke(createPrRegion);
}
}
});
RebalanceResults results = doRebalance(false, manager);
assertEquals(0, results.getTotalBucketCreatesCompleted());
assertEquals(0, results.getTotalPrimaryTransfersCompleted());
assertEquals(4, results.getTotalBucketTransfersCompleted());
assertTrue(0 < results.getTotalBucketTransferBytes());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(0, details.getBucketCreatesCompleted());
assertEquals(0, details.getPrimaryTransfersCompleted());
assertTrue(0 < details.getBucketTransferBytes());
assertEquals(4, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> beforeDetails = details.getPartitionMemberDetailsBefore();
// there should have only been 2 members when the rebalancing started.
assertEquals(2, beforeDetails.size());
// if it was done, there should now be 3 members.
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(3, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(2, memberDetails.getBucketCount());
assertEquals(2, memberDetails.getPrimaryCount());
}
verifyStats(manager, results);
InternalResourceManager mgr = (InternalResourceManager) manager;
ResourceManagerStats stats = mgr.getStats();
assertEquals(1, stats.getRebalanceMembershipChanges());
return null;
}
});
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class RebalanceOperationDUnitTest method setLoadProbe.
private LoadProbe setLoadProbe(VM vm, final LoadProbe probe) {
LoadProbe oldProbe = (LoadProbe) vm.invoke(new SerializableCallable("set load probe") {
public Object call() {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager mgr = cache.getInternalResourceManager();
return mgr.setLoadProbe(probe);
}
});
return oldProbe;
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class PersistentRecoveryOrderDUnitTest method testRevokeAHostBeforeInitialization.
/**
* Tests to make sure that we can revoke a member before initialization, and that member will stay
* revoked
*
* @throws Exception
*/
@Test
public void testRevokeAHostBeforeInitialization() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
LogWriterUtils.getLogWriter().info("Creating region in VM0");
createPersistentRegion(vm0);
LogWriterUtils.getLogWriter().info("Creating region in VM1");
createPersistentRegion(vm1);
putAnEntry(vm0);
vm0.invoke(new SerializableRunnable("Check for waiting regions") {
public void run() {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
PersistentMemberManager mm = cache.getPersistentMemberManager();
Map<String, Set<PersistentMemberID>> waitingRegions = mm.getWaitingRegions();
assertEquals(0, waitingRegions.size());
}
});
LogWriterUtils.getLogWriter().info("closing region in vm0");
closeRegion(vm0);
updateTheEntry(vm1);
LogWriterUtils.getLogWriter().info("closing region in vm1");
closeRegion(vm1);
final File dirToRevoke = getDiskDirForVM(vm1);
vm2.invoke(new SerializableRunnable("Revoke the member") {
public void run() {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
DistributedSystemConfig config;
AdminDistributedSystem adminDS = null;
try {
config = AdminDistributedSystemFactory.defineDistributedSystem(getSystem(), "");
adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
adminDS.connect();
adminDS.revokePersistentMember(InetAddress.getLocalHost(), dirToRevoke.getCanonicalPath());
} catch (Exception e) {
Assert.fail("Unexpected exception", e);
} finally {
if (adminDS != null) {
adminDS.disconnect();
}
}
}
});
// This shouldn't wait, because we revoked the member
LogWriterUtils.getLogWriter().info("Creating region in VM0");
createPersistentRegion(vm0);
checkForRecoveryStat(vm0, true);
// Check to make sure we recovered the old
// value of the entry.
SerializableRunnable checkForEntry = new SerializableRunnable("check for the entry") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion(REGION_NAME);
assertEquals("B", region.get("A"));
}
};
vm0.invoke(checkForEntry);
// Now, we should not be able to create a region
// in vm1, because the this member was revoked
LogWriterUtils.getLogWriter().info("Creating region in VM1");
IgnoredException e = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getSimpleName(), vm1);
try {
createPersistentRegion(vm1);
fail("We should have received a split distributed system exception");
} catch (RuntimeException expected) {
if (!(expected.getCause() instanceof RevokedPersistentDataException)) {
throw expected;
}
// Do nothing
} finally {
e.remove();
}
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testLotsOfTombstonesExpiration.
/**
* Test that we correctly recover and expire recovered tombstones, with compaction enabled.
*
* This test differs from above test in that we need to make sure tombstones start expiring based
* on their original time-stamp, NOT the time-stamp assigned during scheduling for expiration
* after recovery.
*/
@Ignore
@Test
public void testLotsOfTombstonesExpiration() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
vm0.invoke(new CacheSerializableRunnable("") {
@Override
public void run2() throws CacheException {
long replicatedTombstoneTomeout = TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT;
int expiriredTombstoneLimit = TombstoneService.EXPIRED_TOMBSTONE_LIMIT;
try {
LocalRegion region = createRegion(vm0);
int initialCount = getTombstoneCount(region);
assertEquals(0, initialCount);
final int entryCount = 20;
for (int i = 0; i < entryCount; i++) {
region.put(i, new byte[100]);
// destroy each entry.
region.destroy(i);
}
assertEquals(entryCount, getTombstoneCount(region));
// roll to a new oplog
region.getDiskStore().forceRoll();
// Force a compaction. This should do nothing, because
// The tombstones are not garbage, so only 50% of the oplog
// is garbage (the creates).
region.getDiskStore().forceCompaction();
assertEquals(0, region.getDiskStore().numCompactableOplogs());
assertEquals(entryCount, getTombstoneCount(region));
getCache().close();
// We should wait for timeout time so that tomstones are expired
// right away when they are gIId based on their original timestamp.
Wait.pause((int) TEST_REPLICATED_TOMBSTONE_TIMEOUT);
TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = TEST_REPLICATED_TOMBSTONE_TIMEOUT;
TombstoneService.EXPIRED_TOMBSTONE_LIMIT = entryCount;
// Do region GII
region = createRegion(vm0);
assertEquals(entryCount, getTombstoneCount(region));
getCache().getLogger().fine("Waiting for maximumSleepTime ms");
// maximumSleepTime+500 in TombstoneSweeper GC thread
Wait.pause(10000);
// Tombstones should have been expired and garbage collected by now by
// TombstoneService.
assertEquals(0, getTombstoneCount(region));
// This should compact some oplogs
region.getDiskStore().forceCompaction();
assertEquals(0, region.getDiskStore().numCompactableOplogs());
// Test after restart the tombstones are still missing
getCache().close();
region = createRegion(vm0);
assertEquals(0, getTombstoneCount(region));
// We should have an oplog available for compaction, because the
// tombstones
// were garbage collected
assertEquals(0, region.getDiskStore().numCompactableOplogs());
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
cache.close();
} finally {
TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = replicatedTombstoneTomeout;
TombstoneService.EXPIRED_TOMBSTONE_LIMIT = expiriredTombstoneLimit;
}
}
});
}
use of org.apache.geode.internal.cache.GemFireCacheImpl in project geode by apache.
the class HABug36738DUnitTest method createServerCacheWithHA.
private void createServerCacheWithHA() throws Exception {
cache = CacheFactory.create(getSystem());
final AttributesFactory factory = new AttributesFactory();
factory.setMirrorType(MirrorType.KEYS_VALUES);
factory.setScope(Scope.DISTRIBUTED_ACK);
haRegion = HARegion.getInstance(HAREGION_NAME, (GemFireCacheImpl) cache, null, factory.createRegionAttributes());
}
Aggregations