use of org.apache.geode.distributed.internal.membership.NetView in project geode by apache.
the class GMSJoinLeaveJUnitTest method testNoViewAckCausesRemovalMessage.
@Test
public void testNoViewAckCausesRemovalMessage() throws Exception {
initMocks(true);
when(healthMonitor.checkIfAvailable(isA(InternalDistributedMember.class), isA(String.class), isA(Boolean.class))).thenReturn(false);
prepareAndInstallView(mockMembers[0], createMemberList(mockMembers[0], gmsJoinLeaveMemberId));
NetView oldView = gmsJoinLeave.getView();
NetView newView = new NetView(oldView, oldView.getViewId() + 1);
// the new view will remove the old coordinator (normal shutdown) and add a new member
// who will not ack the view. This should cause it to be removed from the system
// with a RemoveMemberMessage
newView.add(mockMembers[2]);
newView.remove(mockMembers[0]);
InstallViewMessage installViewMessage = getInstallViewMessage(newView, credentials, false);
gmsJoinLeave.processMessage(installViewMessage);
// this test's member-timeout * 3
Awaitility.await().atMost(6, SECONDS).until(() -> gmsJoinLeave.getView().getViewId() != oldView.getViewId());
assertTrue(gmsJoinLeave.isCoordinator());
// wait for suspect processing
verify(healthMonitor, timeout(10000).atLeast(1)).checkIfAvailable(isA(DistributedMember.class), isA(String.class), isA(Boolean.class));
// verify(messenger, atLeast(1)).send(isA(RemoveMemberMessage.class));
}
use of org.apache.geode.distributed.internal.membership.NetView in project geode by apache.
the class GMSQuorumCheckerJUnitTest method testQuorumCheckerMajorityRespond.
@Test
public void testQuorumCheckerMajorityRespond() throws Exception {
NetView view = prepareView();
Set<Integer> pongResponders = new HashSet<Integer>();
for (int i = 0; i < mockMembers.length - 1; i++) {
pongResponders.add(mockMembers[i].getPort());
}
PingMessageAnswer answerer = new PingMessageAnswer(channel, pongResponders);
Mockito.doAnswer(answerer).when(channel).send(any(Message.class));
GMSQuorumChecker qc = new GMSQuorumChecker(view, 51, channel);
qc.initialize();
boolean quorum = qc.checkForQuorum(500);
assertTrue(quorum);
assertSame(view.getMembers().size(), answerer.getPingCount());
}
use of org.apache.geode.distributed.internal.membership.NetView in project geode by apache.
the class GMSQuorumCheckerJUnitTest method testQuorumCheckerAllRespond.
@Test
public void testQuorumCheckerAllRespond() throws Exception {
NetView view = prepareView();
Set<Integer> pongResponders = new HashSet<Integer>();
for (int i = 0; i < mockMembers.length; i++) {
pongResponders.add(mockMembers[i].getPort());
}
PingMessageAnswer answerer = new PingMessageAnswer(channel, pongResponders);
Mockito.doAnswer(answerer).when(channel).send(any(Message.class));
GMSQuorumChecker qc = new GMSQuorumChecker(view, 51, channel);
qc.initialize();
boolean quorum = qc.checkForQuorum(500);
assertTrue(quorum);
assertSame(view.getMembers().size(), answerer.getPingCount());
assertTrue(qc.checkForQuorum(500));
assertSame(qc.getMembershipInfo(), channel);
}
use of org.apache.geode.distributed.internal.membership.NetView in project geode by apache.
the class GMSQuorumCheckerJUnitTest method testQuorumCheckerNotEnoughWeightForQuorum.
@Test
public void testQuorumCheckerNotEnoughWeightForQuorum() throws Exception {
NetView view = prepareView();
Set<Integer> pongResponders = new HashSet<Integer>();
pongResponders.add(mockMembers[0].getPort());
PingMessageAnswer answerer = new PingMessageAnswer(channel, pongResponders);
Mockito.doAnswer(answerer).when(channel).send(any(Message.class));
GMSQuorumChecker qc = new GMSQuorumChecker(view, 51, channel);
qc.initialize();
boolean quorum = qc.checkForQuorum(500);
assertFalse(quorum);
assertSame(view.getMembers().size(), answerer.getPingCount());
}
use of org.apache.geode.distributed.internal.membership.NetView in project geode by apache.
the class LocatorDUnitTest method testCollocatedLocatorWithSecurity.
//////// Test Methods
/**
* This tests that the locator can resume control as coordinator after all locators have been shut
* down and one is restarted. It's necessary to have a lock service start so elder failover is
* forced to happen. Prior to fixing how this worked it hung with the restarted locator trying to
* become elder again because it put its address at the beginning of the new view it sent out.
*/
@Test
public void testCollocatedLocatorWithSecurity() throws Exception {
disconnectAllFromDS();
Host host = Host.getHost(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
DistributedTestUtils.deleteLocatorStateFile(port1);
final String locators = NetworkUtils.getServerHostName(host) + "[" + port1 + "]";
final Properties properties = new Properties();
properties.put(MCAST_PORT, "0");
properties.put(START_LOCATOR, locators);
properties.put(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
properties.put(SECURITY_PEER_AUTH_INIT, "org.apache.geode.distributed.AuthInitializer.create");
properties.put(SECURITY_PEER_AUTHENTICATOR, "org.apache.geode.distributed.MyAuthenticator.create");
properties.put(ENABLE_CLUSTER_CONFIGURATION, "false");
properties.put(USE_CLUSTER_CONFIGURATION, "false");
addDSProps(properties);
system = (InternalDistributedSystem) DistributedSystem.connect(properties);
InternalDistributedMember mbr = system.getDistributedMember();
assertEquals("expected the VM to have NORMAL vmKind", DistributionManager.NORMAL_DM_TYPE, system.getDistributedMember().getVmKind());
properties.remove(START_LOCATOR);
properties.put(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
properties.put(LOCATORS, locators);
SerializableRunnable startSystem = new SerializableRunnable("start system") {
public void run() {
system = (InternalDistributedSystem) DistributedSystem.connect(properties);
}
};
vm1.invoke(startSystem);
vm2.invoke(startSystem);
// ensure that I, as a collocated locator owner, can create a cache region
Cache cache = CacheFactory.create(system);
Region r = cache.createRegionFactory(RegionShortcut.REPLICATE).create("test-region");
assertNotNull("expected to create a region", r);
// create a lock service and have every vm get a lock
DistributedLockService service = DistributedLockService.create("test service", system);
service.becomeLockGrantor();
service.lock("foo0", 0, 0);
vm1.invoke("get the lock service and lock something", () -> DistributedLockService.create("test service", system).lock("foo1", 0, 0));
vm2.invoke("get the lock service and lock something", () -> DistributedLockService.create("test service", system).lock("foo2", 0, 0));
// cause elder failover. vm1 will become the lock grantor
system.disconnect();
try {
vm1.invoke("ensure grantor failover", () -> {
DistributedLockService serviceNamed = DistributedLockService.getServiceNamed("test service");
serviceNamed.lock("foo3", 0, 0);
Awaitility.waitAtMost(10000, TimeUnit.MILLISECONDS).pollInterval(200, TimeUnit.MILLISECONDS).until(() -> serviceNamed.isLockGrantor());
assertTrue(serviceNamed.isLockGrantor());
});
properties.put(START_LOCATOR, locators);
properties.put(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
system = (InternalDistributedSystem) DistributedSystem.connect(properties);
System.out.println("done connecting distributed system. Membership view is " + MembershipManagerHelper.getMembershipManager(system).getView());
assertEquals("should be the coordinator", system.getDistributedMember(), MembershipManagerHelper.getCoordinator(system));
NetView view = MembershipManagerHelper.getMembershipManager(system).getView();
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("view after becoming coordinator is " + view);
assertNotSame("should not be the first member in the view (" + view + ")", system.getDistributedMember(), view.get(0));
service = DistributedLockService.create("test service", system);
// now force a non-elder VM to get a lock. This will hang if the bug is not fixed
vm2.invoke("get the lock service and lock something", () -> {
DistributedLockService.getServiceNamed("test service").lock("foo4", 0, 0);
});
assertFalse("should not have become lock grantor", service.isLockGrantor());
// Now demonstrate that a new member can join and use the lock service
properties.remove(START_LOCATOR);
vm3.invoke(startSystem);
vm3.invoke("get the lock service and lock something(2)", () -> DistributedLockService.create("test service", system).lock("foo5", 0, 0));
} finally {
disconnectAllFromDS();
}
}
Aggregations