use of org.apache.zookeeper.admin.ZooKeeperAdmin in project zookeeper by apache.
the class ReconfigTest method testJMXBeanAfterRoleChange.
/**
* Tests verifies the jmx attributes of local and remote peer bean - change
* participant to observer role
*/
@Test
public void testJMXBeanAfterRoleChange() throws Exception {
// create 3 servers
qu = new QuorumUtil(1);
qu.disableJMXTest = true;
qu.startAll();
ZooKeeper[] zkArr = createHandles(qu);
ZooKeeperAdmin[] zkAdminArr = createAdminHandles(qu);
// changing a server's role / port is done by "adding" it with the same
// id but different role / port
List<String> joiningServers = new ArrayList<String>();
// assert remotePeerBean.1 of ReplicatedServer_2
int changingIndex = 1;
int replica2 = 2;
QuorumPeer peer2 = qu.getPeer(replica2).peer;
QuorumServer changingQS2 = peer2.getView().get(new Long(changingIndex));
String remotePeerBean2 = CommonNames.DOMAIN + ":name0=ReplicatedServer_id" + replica2 + ",name1=replica." + changingIndex;
assertRemotePeerMXBeanAttributes(changingQS2, remotePeerBean2);
// assert remotePeerBean.1 of ReplicatedServer_3
int replica3 = 3;
QuorumPeer peer3 = qu.getPeer(replica3).peer;
QuorumServer changingQS3 = peer3.getView().get(new Long(changingIndex));
String remotePeerBean3 = CommonNames.DOMAIN + ":name0=ReplicatedServer_id" + replica3 + ",name1=replica." + changingIndex;
assertRemotePeerMXBeanAttributes(changingQS3, remotePeerBean3);
String newRole = "observer";
ZooKeeper zk = zkArr[changingIndex];
ZooKeeperAdmin zkAdmin = zkAdminArr[changingIndex];
// exactly as it is now, except for role change
joiningServers.add("server." + changingIndex + "=127.0.0.1:" + qu.getPeer(changingIndex).peer.getQuorumAddress().getPort() + ":" + qu.getPeer(changingIndex).peer.getElectionAddress().getPort() + ":" + newRole + ";127.0.0.1:" + qu.getPeer(changingIndex).peer.getClientPort());
reconfig(zkAdmin, joiningServers, null, null, -1);
testNormalOperation(zkArr[changingIndex], zk);
Assert.assertTrue(qu.getPeer(changingIndex).peer.observer != null && qu.getPeer(changingIndex).peer.follower == null && qu.getPeer(changingIndex).peer.leader == null);
Assert.assertTrue(qu.getPeer(changingIndex).peer.getPeerState() == ServerState.OBSERVING);
QuorumPeer qp = qu.getPeer(changingIndex).peer;
String localPeerBeanName = CommonNames.DOMAIN + ":name0=ReplicatedServer_id" + changingIndex + ",name1=replica." + changingIndex;
// localPeerBean.1 of ReplicatedServer_1
assertLocalPeerMXBeanAttributes(qp, localPeerBeanName, true);
// assert remotePeerBean.1 of ReplicatedServer_2
changingQS2 = peer2.getView().get(new Long(changingIndex));
assertRemotePeerMXBeanAttributes(changingQS2, remotePeerBean2);
// assert remotePeerBean.1 of ReplicatedServer_3
changingQS3 = peer3.getView().get(new Long(changingIndex));
assertRemotePeerMXBeanAttributes(changingQS3, remotePeerBean3);
closeAllHandles(zkArr, zkAdminArr);
}
use of org.apache.zookeeper.admin.ZooKeeperAdmin in project zookeeper by apache.
the class StandaloneDisabledTest method startServer.
/**
* Starts a single server in replicated mode,
* initializes its client, and waits for it
* to be connected.
*/
private void startServer(int id, String config) throws Exception {
peers[id] = new MainThread(id, clientPorts[id], config);
peers[id].start();
Assert.assertTrue("Server " + id + " is not up", ClientBase.waitForServerUp("127.0.0.1:" + clientPorts[id], CONNECTION_TIMEOUT));
Assert.assertTrue("Error- Server started in Standalone Mode!", peers[id].isQuorumPeerRunning());
zkHandles[id] = ClientBase.createZKClient("127.0.0.1:" + clientPorts[id]);
zkAdminHandles[id] = new ZooKeeperAdmin("127.0.0.1:" + clientPorts[id], CONNECTION_TIMEOUT, this);
zkAdminHandles[id].addAuthInfo("digest", "super:test".getBytes());
}
use of org.apache.zookeeper.admin.ZooKeeperAdmin in project zookeeper by apache.
the class ReconfigDuringLeaderSyncTest method testDuringLeaderSync.
/**
* <pre>
* Test case for https://issues.apache.org/jira/browse/ZOOKEEPER-2172.
* Cluster crashes when reconfig a new node as a participant.
* </pre>
*
* This issue occurs when reconfig's PROPOSAL and COMMITANDACTIVATE come in
* between the snapshot and the UPTODATE. In this case processReconfig was
* not invoked on the newly added node, and zoo.cfg.dynamic.next wasn't
* deleted.
*/
@Test
public void testDuringLeaderSync() throws Exception {
final int[] clientPorts = new int[SERVER_COUNT + 1];
StringBuilder sb = new StringBuilder();
String[] serverConfig = new String[SERVER_COUNT + 1];
for (int i = 0; i < SERVER_COUNT; i++) {
clientPorts[i] = PortAssignment.unique();
serverConfig[i] = "server." + i + "=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ":participant;127.0.0.1:" + clientPorts[i];
sb.append(serverConfig[i] + "\n");
}
String currentQuorumCfgSection = sb.toString();
mt = new MainThread[SERVER_COUNT + 1];
// start 3 servers
for (int i = 0; i < SERVER_COUNT; i++) {
mt[i] = new MainThread(i, clientPorts[i], currentQuorumCfgSection, false);
mt[i].start();
}
// ensure all servers started
for (int i = 0; i < SERVER_COUNT; i++) {
Assert.assertTrue("waiting for server " + i + " being up", ClientBase.waitForServerUp("127.0.0.1:" + clientPorts[i], CONNECTION_TIMEOUT));
}
CountdownWatcher watch = new CountdownWatcher();
ZooKeeperAdmin preReconfigClient = new ZooKeeperAdmin("127.0.0.1:" + clientPorts[0], ClientBase.CONNECTION_TIMEOUT, watch);
preReconfigClient.addAuthInfo("digest", "super:test".getBytes());
watch.waitForConnected(ClientBase.CONNECTION_TIMEOUT);
// new server joining
int joinerId = SERVER_COUNT;
clientPorts[joinerId] = PortAssignment.unique();
serverConfig[joinerId] = "server." + joinerId + "=127.0.0.1:" + PortAssignment.unique() + ":" + PortAssignment.unique() + ":participant;127.0.0.1:" + clientPorts[joinerId];
// Find leader id.
int leaderId = -1;
for (int i = 0; i < SERVER_COUNT; i++) {
if (mt[i].main.quorumPeer.leader != null) {
leaderId = i;
break;
}
}
assertFalse(leaderId == -1);
// Joiner initial config consists of itself and the leader.
sb = new StringBuilder();
sb.append(serverConfig[leaderId] + "\n").append(serverConfig[joinerId] + "\n");
/**
* This server will delay the response to a NEWLEADER message, and run
* reconfig command so that message at this processed in bellow order
*
* <pre>
* NEWLEADER
* reconfig's PROPOSAL
* reconfig's COMMITANDACTIVATE
* UPTODATE
* </pre>
*/
mt[joinerId] = new MainThread(joinerId, clientPorts[joinerId], sb.toString(), false) {
@Override
public TestQPMain getTestQPMain() {
return new MockTestQPMain();
}
};
mt[joinerId].start();
CustomQuorumPeer qp = getCustomQuorumPeer(mt[joinerId]);
// delete any already existing .next file
String nextDynamicConfigFilename = qp.getNextDynamicConfigFilename();
File nextDynaFile = new File(nextDynamicConfigFilename);
nextDynaFile.delete();
// Leader.NEWLEADER
while (true) {
if (qp.isNewLeaderMessage()) {
preReconfigClient.reconfigure(serverConfig[joinerId], null, null, -1, null, null);
break;
} else {
// sleep for 10 millisecond and then again check
Thread.sleep(10);
}
}
watch = new CountdownWatcher();
ZooKeeper postReconfigClient = new ZooKeeper("127.0.0.1:" + clientPorts[joinerId], ClientBase.CONNECTION_TIMEOUT, watch);
watch.waitForConnected(ClientBase.CONNECTION_TIMEOUT);
// do one successful operation on the newly added node
postReconfigClient.create("/reconfigIssue", "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
assertFalse("zoo.cfg.dynamic.next is not deleted.", nextDynaFile.exists());
// verify that joiner has up-to-date config, including all four servers.
for (long j = 0; j <= SERVER_COUNT; j++) {
assertNotNull("server " + j + " is not present in the new quorum", qp.getQuorumVerifier().getVotingMembers().get(j));
}
// close clients
preReconfigClient.close();
postReconfigClient.close();
}
use of org.apache.zookeeper.admin.ZooKeeperAdmin in project zookeeper by apache.
the class ReconfigFailureCasesTest method testTooFewRemainingPariticipants.
/*
* Test that a reconfiguration fails if the proposed change would leave the
* cluster with less than 2 participants (StandaloneEnabled = true).
* StandaloneDisabledTest.java (startSingleServerTest) checks that if
* StandaloneEnabled = false its legal to remove all but one remaining
* server.
*/
@Test
public void testTooFewRemainingPariticipants() throws Exception {
// create 3 servers
qu = new QuorumUtil(1);
qu.disableJMXTest = true;
qu.startAll();
ZooKeeper[] zkArr = ReconfigTest.createHandles(qu);
ZooKeeperAdmin[] zkAdminArr = ReconfigTest.createAdminHandles(qu);
List<String> leavingServers = new ArrayList<String>();
leavingServers.add("2");
leavingServers.add("3");
try {
zkAdminArr[1].reconfigure(null, leavingServers, null, -1, null);
Assert.fail("Reconfig should have failed since the current config version is not 8");
} catch (KeeperException.BadArgumentsException e) {
// We expect this to happen.
} catch (Exception e) {
Assert.fail("Should have been BadArgumentsException!");
}
ReconfigTest.closeAllHandles(zkArr, zkAdminArr);
}
use of org.apache.zookeeper.admin.ZooKeeperAdmin in project zookeeper by apache.
the class ReconfigFailureCasesTest method testLeaderTimesoutOnNewQuorum.
/*
* Tests that if a quorum of a new config is synced with the leader and a reconfig
* is allowed to start but then the new quorum is lost, the leader will time out and
* we go to leader election.
*/
@Test
public void testLeaderTimesoutOnNewQuorum() throws Exception {
// create 3 servers
qu = new QuorumUtil(1);
qu.disableJMXTest = true;
qu.startAll();
ZooKeeper[] zkArr = ReconfigTest.createHandles(qu);
ZooKeeperAdmin[] zkAdminArr = ReconfigTest.createAdminHandles(qu);
List<String> leavingServers = new ArrayList<String>();
leavingServers.add("3");
qu.shutdown(2);
try {
// Since we just shut down server 2, its still considered "synced"
// by the leader, which allows us to start the reconfig
// (PrepRequestProcessor checks that a quorum of the new
// config is synced before starting a reconfig).
// We try to remove server 3, which requires a quorum of {1,2,3}
// (we have that) and of {1,2}, but 2 is down so we won't get a
// quorum of new config ACKs.
zkAdminArr[1].reconfigure(null, leavingServers, null, -1, null);
Assert.fail("Reconfig should have failed since we don't have quorum of new config");
} catch (KeeperException.ConnectionLossException e) {
// We expect leader to lose quorum of proposed config and time out
} catch (Exception e) {
Assert.fail("Should have been ConnectionLossException!");
}
// The leader should time out and remaining servers should go into
// LOOKING state. A new leader won't be established since that
// would require completing the reconfig, which is not possible while
// 2 is down.
Assert.assertEquals(QuorumStats.Provider.LOOKING_STATE, qu.getPeer(1).peer.getServerState());
Assert.assertEquals(QuorumStats.Provider.LOOKING_STATE, qu.getPeer(3).peer.getServerState());
ReconfigTest.closeAllHandles(zkArr, zkAdminArr);
}
Aggregations