use of org.junit.jupiter.params.provider.ValueSource in project zookeeper by apache.
the class LearnerMetricsTest method testLearnerMetricsTest.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testLearnerMetricsTest(boolean asyncSending) throws Exception {
Learner.setAsyncSending(asyncSending);
ServerMetrics.getMetrics().resetAll();
ClientBase.setupTestEnv();
final String path = "/zk-testLeanerMetrics";
final byte[] data = new byte[512];
final int[] clientPorts = new int[SERVER_COUNT];
StringBuilder sb = new StringBuilder();
int observer = 0;
clientPorts[observer] = PortAssignment.unique();
sb.append("server." + observer + "=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ":observer\n");
for (int i = 1; i < SERVER_COUNT; i++) {
clientPorts[i] = PortAssignment.unique();
sb.append("server." + i + "=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + "\n");
}
// start the three participants
String quorumCfgSection = sb.toString();
for (int i = 1; i < SERVER_COUNT; i++) {
mt[i] = new QuorumPeerTestBase.MainThread(i, clientPorts[i], quorumCfgSection);
mt[i].start();
}
// start the observer
Map<String, String> observerConfig = new HashMap<>();
observerConfig.put("peerType", "observer");
mt[observer] = new QuorumPeerTestBase.MainThread(observer, clientPorts[observer], quorumCfgSection, observerConfig);
mt[observer].start();
// connect to the observer node and wait for CONNECTED state
// (this way we make sure to wait until the leader election finished and the observer node joined as well)
zk_client = new ZooKeeper("127.0.0.1:" + clientPorts[observer], ClientBase.CONNECTION_TIMEOUT, this);
waitForOne(zk_client, ZooKeeper.States.CONNECTED);
// creating a node
zk_client.create(path, data, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
// there are two proposals by now, one for the global client session creation, one for the create request
// there are two followers, each received two PROPOSALs
waitForMetric("learner_proposal_received_count", is(4L));
waitForMetric("cnt_proposal_latency", is(4L));
waitForMetric("min_proposal_latency", greaterThanOrEqualTo(0L));
// the two ACKs are processed by the leader and by each of the two followers
waitForMetric("cnt_proposal_ack_creation_latency", is(6L));
waitForMetric("min_proposal_ack_creation_latency", greaterThanOrEqualTo(0L));
// two COMMITs are received by each of the two followers, and two INFORMs are received by the single observer
// (the INFORM message is also counted into the "commit_received" metrics)
waitForMetric("learner_commit_received_count", is(6L));
waitForMetric("cnt_commit_propagation_latency", is(6L));
waitForMetric("min_commit_propagation_latency", greaterThanOrEqualTo(0L));
}
use of org.junit.jupiter.params.provider.ValueSource in project zookeeper by apache.
the class WatchLeakTest method testWatchesLeak.
/**
* Check that if session has expired then no watch can be set
*/
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testWatchesLeak(boolean sessionTimedout) throws Exception {
NIOServerCnxnFactory serverCnxnFactory = mock(NIOServerCnxnFactory.class);
final SelectionKey sk = new FakeSK();
MockSelectorThread selectorThread = mock(MockSelectorThread.class);
when(selectorThread.addInterestOpsUpdateRequest(any(SelectionKey.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
SelectionKey sk = (SelectionKey) invocation.getArguments()[0];
NIOServerCnxn nioSrvCnx = (NIOServerCnxn) sk.attachment();
sk.interestOps(nioSrvCnx.getInterestOps());
return true;
}
});
ZKDatabase database = new ZKDatabase(null);
database.setlastProcessedZxid(2L);
QuorumPeer quorumPeer = mock(QuorumPeer.class);
FileTxnSnapLog logfactory = mock(FileTxnSnapLog.class);
// Directories are not used but we need it to avoid NPE
when(logfactory.getDataDir()).thenReturn(new File(""));
when(logfactory.getSnapDir()).thenReturn(new File(""));
FollowerZooKeeperServer fzks = null;
try {
// Create a new follower
fzks = new FollowerZooKeeperServer(logfactory, quorumPeer, database);
fzks.startup();
fzks.setServerCnxnFactory(serverCnxnFactory);
quorumPeer.follower = new MyFollower(quorumPeer, fzks);
LOG.info("Follower created");
// Simulate a socket channel between a client and a follower
final SocketChannel socketChannel = createClientSocketChannel();
// Create the NIOServerCnxn that will handle the client requests
final MockNIOServerCnxn nioCnxn = new MockNIOServerCnxn(fzks, socketChannel, sk, serverCnxnFactory, selectorThread);
sk.attach(nioCnxn);
// Send the connection request as a client do
nioCnxn.doIO(sk);
LOG.info("Client connection sent");
// Send the valid or invalid session packet to the follower
QuorumPacket qp = createValidateSessionPacketResponse(!sessionTimedout);
quorumPeer.follower.processPacket(qp);
LOG.info("Session validation sent");
// OK, now the follower knows that the session is valid or invalid, let's try
// to send the watches
nioCnxn.doIO(sk);
// wait for the the request processor to do his job
Thread.sleep(1000L);
LOG.info("Watches processed");
// If session has not been validated, there must be NO watches
int watchCount = database.getDataTree().getWatchCount();
if (sessionTimedout) {
// Session has not been re-validated !
LOG.info("session is not valid, watches = {}", watchCount);
assertEquals(0, watchCount, "Session is not valid so there should be no watches");
} else {
// Session has been re-validated
LOG.info("session is valid, watches = {}", watchCount);
assertEquals(1, watchCount, "Session is valid so the watch should be there");
}
} finally {
if (fzks != null) {
fzks.shutdown();
}
}
}
use of org.junit.jupiter.params.provider.ValueSource in project zookeeper by apache.
the class ReconfigDuringLeaderSyncTest method testDuringLeaderSync.
/**
* <pre>
* Test case for https://issues.apache.org/jira/browse/ZOOKEEPER-2172.
* Cluster crashes when reconfig a new node as a participant.
* </pre>
*
* This issue occurs when reconfig's PROPOSAL and COMMITANDACTIVATE come in
* between the snapshot and the UPTODATE. In this case processReconfig was
* not invoked on the newly added node, and zoo.cfg.dynamic.next wasn't
* deleted.
*/
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testDuringLeaderSync(boolean asyncSending) throws Exception {
setup(asyncSending);
final int[] clientPorts = new int[SERVER_COUNT + 1];
StringBuilder sb = new StringBuilder();
String[] serverConfig = new String[SERVER_COUNT + 1];
for (int i = 0; i < SERVER_COUNT; i++) {
clientPorts[i] = PortAssignment.unique();
serverConfig[i] = "server." + i + "=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ":participant;127.0.0.1:" + clientPorts[i];
sb.append(serverConfig[i] + "\n");
}
String currentQuorumCfgSection = sb.toString();
mt = new MainThread[SERVER_COUNT + 1];
// start 3 servers
for (int i = 0; i < SERVER_COUNT; i++) {
mt[i] = new MainThread(i, clientPorts[i], currentQuorumCfgSection, false);
mt[i].start();
}
// ensure all servers started
for (int i = 0; i < SERVER_COUNT; i++) {
assertTrue(ClientBase.waitForServerUp("127.0.0.1:" + clientPorts[i], CONNECTION_TIMEOUT), "waiting for server " + i + " being up");
}
CountdownWatcher watch = new CountdownWatcher();
ZooKeeperAdmin preReconfigClient = new ZooKeeperAdmin("127.0.0.1:" + clientPorts[0], ClientBase.CONNECTION_TIMEOUT, watch);
preReconfigClient.addAuthInfo("digest", "super:test".getBytes());
watch.waitForConnected(ClientBase.CONNECTION_TIMEOUT);
// new server joining
int joinerId = SERVER_COUNT;
clientPorts[joinerId] = PortAssignment.unique();
serverConfig[joinerId] = "server." + joinerId + "=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ":participant;127.0.0.1:" + clientPorts[joinerId];
// Find leader id.
int leaderId = -1;
for (int i = 0; i < SERVER_COUNT; i++) {
if (mt[i].main.quorumPeer.leader != null) {
leaderId = i;
break;
}
}
assertFalse(leaderId == -1);
// Joiner initial config consists of itself and the leader.
sb = new StringBuilder();
sb.append(serverConfig[leaderId] + "\n").append(serverConfig[joinerId] + "\n");
/**
* This server will delay the response to a NEWLEADER message, and run
* reconfig command so that message at this processed in bellow order
*
* <pre>
* NEWLEADER
* reconfig's PROPOSAL
* reconfig's COMMITANDACTIVATE
* UPTODATE
* </pre>
*/
mt[joinerId] = new MainThread(joinerId, clientPorts[joinerId], sb.toString(), false) {
@Override
public TestQPMain getTestQPMain() {
return new MockTestQPMain();
}
};
mt[joinerId].start();
CustomQuorumPeer qp = getCustomQuorumPeer(mt[joinerId]);
// delete any already existing .next file
String nextDynamicConfigFilename = qp.getNextDynamicConfigFilename();
File nextDynaFile = new File(nextDynamicConfigFilename);
nextDynaFile.delete();
// Leader.NEWLEADER
while (true) {
if (qp.isNewLeaderMessage()) {
preReconfigClient.reconfigure(serverConfig[joinerId], null, null, -1, null, null);
break;
} else {
// sleep for 10 millisecond and then again check
Thread.sleep(10);
}
}
watch = new CountdownWatcher();
ZooKeeper postReconfigClient = new ZooKeeper("127.0.0.1:" + clientPorts[joinerId], ClientBase.CONNECTION_TIMEOUT, watch);
watch.waitForConnected(ClientBase.CONNECTION_TIMEOUT);
// do one successful operation on the newly added node
postReconfigClient.create("/reconfigIssue", "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
assertFalse(nextDynaFile.exists(), "zoo.cfg.dynamic.next is not deleted.");
// verify that joiner has up-to-date config, including all four servers.
for (long j = 0; j <= SERVER_COUNT; j++) {
assertNotNull(qp.getQuorumVerifier().getVotingMembers().get(j), "server " + j + " is not present in the new quorum");
}
// close clients
preReconfigClient.close();
postReconfigClient.close();
}
use of org.junit.jupiter.params.provider.ValueSource in project zookeeper by apache.
the class ObserverMasterTest method testAdminCommands.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testAdminCommands(boolean testObserverMaster) throws IOException, MBeanException, InstanceNotFoundException, ReflectionException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, InvalidAttributeValueException, KeeperException {
// flush all beans, then start
for (ZKMBeanInfo beanInfo : MBeanRegistry.getInstance().getRegisteredBeans()) {
MBeanRegistry.getInstance().unregister(beanInfo);
}
JMXEnv.setUp();
setUp(-1, testObserverMaster);
q3.start();
assertTrue(ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_OBS, CONNECTION_TIMEOUT), "waiting for observer to be up");
// Assert that commands are getting forwarded correctly
zk = new ZooKeeper("127.0.0.1:" + CLIENT_PORT_OBS, ClientBase.CONNECTION_TIMEOUT, this);
zk.create("/obstest", "test".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
assertEquals(new String(zk.getData("/obstest", null, null)), "test");
// test stats collection
final Map<String, String> emptyMap = Collections.emptyMap();
Map<String, Object> stats = Commands.runCommand("mntr", q3.getQuorumPeer().getActiveServer(), emptyMap).toMap();
assertTrue(stats.containsKey("observer_master_id"), "observer not emitting observer_master_id");
// check the stats for the first peer
if (testObserverMaster) {
if (q1.getQuorumPeer().leader == null) {
assertEquals(Integer.valueOf(1), q1.getQuorumPeer().getSynced_observers_metric());
} else {
assertEquals(Integer.valueOf(0), q1.getQuorumPeer().getSynced_observers_metric());
}
} else {
if (q1.getQuorumPeer().leader == null) {
assertNull(q1.getQuorumPeer().getSynced_observers_metric());
} else {
assertEquals(Integer.valueOf(1), q1.getQuorumPeer().getSynced_observers_metric());
}
}
// check the stats for the second peer
if (testObserverMaster) {
if (q2.getQuorumPeer().leader == null) {
assertEquals(Integer.valueOf(1), q2.getQuorumPeer().getSynced_observers_metric());
} else {
assertEquals(Integer.valueOf(0), q2.getQuorumPeer().getSynced_observers_metric());
}
} else {
if (q2.getQuorumPeer().leader == null) {
assertNull(q2.getQuorumPeer().getSynced_observers_metric());
} else {
assertEquals(Integer.valueOf(1), q2.getQuorumPeer().getSynced_observers_metric());
}
}
// test admin commands for disconnection
ObjectName connBean = null;
for (ObjectName bean : JMXEnv.conn().queryNames(new ObjectName(MBeanRegistry.DOMAIN + ":*"), null)) {
if (bean.getCanonicalName().contains("Learner_Connections") && bean.getCanonicalName().contains("id:" + q3.getQuorumPeer().getId())) {
connBean = bean;
break;
}
}
assertNotNull(connBean, "could not find connection bean");
latch = new CountDownLatch(1);
JMXEnv.conn().invoke(connBean, "terminateConnection", new Object[0], null);
assertTrue(latch.await(CONNECTION_TIMEOUT / 2, TimeUnit.MILLISECONDS), "server failed to disconnect on terminate");
assertTrue(ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_OBS, CONNECTION_TIMEOUT), "waiting for server 3 being up");
final String obsBeanName = String.format("org.apache.ZooKeeperService:name0=ReplicatedServer_id%d,name1=replica.%d,name2=Observer", q3.getQuorumPeer().getId(), q3.getQuorumPeer().getId());
Set<ObjectName> names = JMXEnv.conn().queryNames(new ObjectName(obsBeanName), null);
assertEquals(1, names.size(), "expecting singular observer bean");
ObjectName obsBean = names.iterator().next();
if (testObserverMaster) {
// show we can move the observer using the id
long observerMasterId = q3.getQuorumPeer().observer.getLearnerMasterId();
latch = new CountDownLatch(1);
JMXEnv.conn().setAttribute(obsBean, new Attribute("LearnerMaster", Long.toString(3 - observerMasterId)));
assertTrue(latch.await(CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS), "server failed to disconnect on terminate");
assertTrue(ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_OBS, CONNECTION_TIMEOUT), "waiting for server 3 being up");
} else {
// show we get an error
final long leaderId = q1.getQuorumPeer().leader == null ? 2 : 1;
try {
JMXEnv.conn().setAttribute(obsBean, new Attribute("LearnerMaster", Long.toString(3 - leaderId)));
fail("should have seen an exception on previous command");
} catch (RuntimeMBeanException e) {
assertEquals(IllegalArgumentException.class, e.getCause().getClass(), "mbean failed for the wrong reason");
}
}
shutdown();
JMXEnv.tearDown();
}
use of org.junit.jupiter.params.provider.ValueSource in project zookeeper by apache.
the class ObserverMasterTest method testRevalidation.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testRevalidation(boolean testObserverMaster) throws Exception {
setUp(-1, testObserverMaster);
q3.start();
assertTrue(ClientBase.waitForServerUp("127.0.0.1:" + CLIENT_PORT_OBS, CONNECTION_TIMEOUT), "waiting for server 3 being up");
final int leaderProxyPort = PortAssignment.unique();
final int obsProxyPort = PortAssignment.unique();
int leaderPort = q1.getQuorumPeer().leader == null ? CLIENT_PORT_QP2 : CLIENT_PORT_QP1;
PortForwarder leaderPF = new PortForwarder(leaderProxyPort, leaderPort);
latch = new CountDownLatch(1);
zk = new ZooKeeper(String.format("127.0.0.1:%d,127.0.0.1:%d", leaderProxyPort, obsProxyPort), ClientBase.CONNECTION_TIMEOUT, this);
latch.await();
zk.create("/revalidtest", "test".getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
assertNotNull(zk.exists("/revalidtest", null), "Read-after write failed");
latch = new CountDownLatch(2);
PortForwarder obsPF = new PortForwarder(obsProxyPort, CLIENT_PORT_OBS);
try {
leaderPF.shutdown();
} catch (Exception e) {
// ignore?
}
latch.await();
assertEquals(new String(zk.getData("/revalidtest", null, null)), "test");
obsPF.shutdown();
shutdown();
}
Aggregations