use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class ReconnectDUnitTest method addReconnectListener.
void addReconnectListener() {
// reset the count for this listener
reconnectTries = 0;
LogWriterUtils.getLogWriter().info("adding reconnect listener");
ReconnectListener reconlis = new ReconnectListener() {
public void reconnecting(InternalDistributedSystem oldSys) {
LogWriterUtils.getLogWriter().info("reconnect listener invoked");
reconnectTries++;
}
public void onReconnect(InternalDistributedSystem system1, InternalDistributedSystem system2) {
}
};
InternalDistributedSystem.addReconnectListener(reconlis);
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class ReconnectedCacheServerDUnitTest method testDefaultCacheServerNotCreatedOnReconnect.
@Test
public void testDefaultCacheServerNotCreatedOnReconnect() {
assertFalse(Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "autoReconnect-useCacheXMLFile"));
GemFireCacheImpl gc = (GemFireCacheImpl) this.cache;
// fool the system into thinking cluster-config is being used
GMSMembershipManager mgr = (GMSMembershipManager) MembershipManagerHelper.getMembershipManager(gc.getDistributedSystem());
final boolean sharedConfigEnabled = true;
mgr.saveCacheXmlForReconnect(sharedConfigEnabled);
// the cache server config should now be stored in the cache's config
assertFalse(gc.getCacheServers().isEmpty());
int numServers = gc.getCacheServers().size();
assertNotNull(gc.getCacheConfig().getCacheServerCreation());
InternalDistributedSystem system = gc.getInternalDistributedSystem();
system.createAndStartCacheServers(gc.getCacheConfig().getCacheServerCreation(), gc);
assertEquals("found these cache servers:" + gc.getCacheServers(), numServers, gc.getCacheServers().size());
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class RegionMembershipListenerDUnitTest method crashCacheOtherVm.
private void crashCacheOtherVm() {
VM vm = getOtherVm();
vm.invoke(new CacheSerializableRunnable("crash cache") {
public void run2() throws CacheException {
// shut down the gms before the distributed system to simulate
// a crash. In post-5.1.x, this could use SystemFailure.initFailure()
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalDistributedSystem sys = (InternalDistributedSystem) cache.getDistributedSystem();
MembershipManagerHelper.crashDistributedSystem(sys);
}
});
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class CacheAdvisorDUnitTest method getDistributionManagerId.
/**
* Accessed via reflection. DO NOT REMOVE
*/
protected InternalDistributedMember getDistributionManagerId() {
Cache cache = getCache();
DistributedSystem ds = cache.getDistributedSystem();
return ((InternalDistributedSystem) ds).getDistributionManager().getId();
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method replaceOfflineMemberAndRestart.
/**
* Test for support issue 7870. 1. Run three members with redundancy 1 and recovery delay 0 2.
* Kill one of the members, to trigger replacement of buckets 3. Shutdown all members and restart.
*
* What was happening is that in the parent PR, we discarded our offline data in one member, but
* in the child PR the other members ended up waiting for the child bucket to be created in the
* member that discarded it's offline data.
*
* @throws Throwable
*/
public void replaceOfflineMemberAndRestart(SerializableRunnable createPRs) throws Throwable {
disconnectAllFromDS();
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
// Create the PR on three members
vm0.invoke(createPRs);
vm1.invoke(createPRs);
vm2.invoke(createPRs);
// Create some buckets.
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "a", "region2");
// Close one of the members to trigger redundancy recovery.
closeCache(vm2);
// Wait until redundancy is recovered.
waitForRedundancyRecovery(vm0, 1, PR_REGION_NAME);
waitForRedundancyRecovery(vm0, 1, "region2");
createData(vm0, 0, NUM_BUCKETS, "b");
createData(vm0, 0, NUM_BUCKETS, "b", "region2");
IgnoredException expected = IgnoredException.addIgnoredException("PartitionOfflineException");
try {
// Close the remaining members.
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
InternalDistributedSystem ds = (InternalDistributedSystem) getCache().getDistributedSystem();
AdminDistributedSystemImpl.shutDownAllMembers(ds.getDistributionManager(), 600000);
return null;
}
});
// Make sure that vm-1 is completely disconnected
// The shutdown all asynchronously finishes the disconnect after
// replying to the admin member.
vm1.invoke(new SerializableRunnable() {
public void run() {
basicGetSystem().disconnect();
}
});
// Recreate the members. Try to make sure that
// the member with the latest copy of the buckets
// is the one that decides to throw away it's copy
// by starting it last.
AsyncInvocation async0 = vm0.invokeAsync(createPRs);
AsyncInvocation async1 = vm1.invokeAsync(createPRs);
Wait.pause(2000);
AsyncInvocation async2 = vm2.invokeAsync(createPRs);
async0.getResult(MAX_WAIT);
async1.getResult(MAX_WAIT);
async2.getResult(MAX_WAIT);
checkData(vm0, 0, NUM_BUCKETS, "b");
checkData(vm0, 0, NUM_BUCKETS, "b", "region2");
waitForRedundancyRecovery(vm0, 1, PR_REGION_NAME);
waitForRedundancyRecovery(vm0, 1, "region2");
waitForRedundancyRecovery(vm1, 1, PR_REGION_NAME);
waitForRedundancyRecovery(vm1, 1, "region2");
waitForRedundancyRecovery(vm2, 1, PR_REGION_NAME);
waitForRedundancyRecovery(vm2, 1, "region2");
// Make sure we don't have any extra buckets after the restart
int totalBucketCount = getBucketList(vm0).size();
totalBucketCount += getBucketList(vm1).size();
totalBucketCount += getBucketList(vm2).size();
assertEquals(2 * NUM_BUCKETS, totalBucketCount);
totalBucketCount = getBucketList(vm0, "region2").size();
totalBucketCount += getBucketList(vm1, "region2").size();
totalBucketCount += getBucketList(vm2, "region2").size();
assertEquals(2 * NUM_BUCKETS, totalBucketCount);
} finally {
expected.remove();
}
}
Aggregations