use of org.apache.zookeeper.ZooKeeper in project zookeeper by apache.
the class ZxidRolloverTest method testRolloverThenLeaderRestart.
/**
* Similar to testRolloverThenRestart, but ensure leadership can change,
* comes back, has the right data, and is able to serve new requests.
*/
@Test
public void testRolloverThenLeaderRestart() throws Exception {
ZooKeeper zk = getClient(idxLeader);
int countCreated = createNodes(zk, 0, 10);
adjustEpochNearEnd();
checkNodes(zk, 0, countCreated);
shutdown(idxLeader);
start(idxLeader);
zk = getClient(idxLeader);
checkNodes(zk, 0, countCreated);
countCreated += createNodes(zk, countCreated, 10);
adjustEpochNearEnd();
checkNodes(zk, 0, countCreated);
countCreated += createNodes(zk, countCreated, 10);
shutdown(idxLeader);
start(idxLeader);
zk = getClient(idxLeader);
checkNodes(zk, 0, countCreated);
countCreated += createNodes(zk, countCreated, 10);
shutdown(idxLeader);
start(idxLeader);
zk = getClient(idxFollower);
checkNodes(zk, 0, countCreated);
countCreated += createNodes(zk, countCreated, 10);
// sanity check
Assert.assertTrue(countCreated > 0);
Assert.assertTrue(countCreated < 50);
}
use of org.apache.zookeeper.ZooKeeper in project zookeeper by apache.
the class PurgeTxnTest method testPurgeWhenLogRollingInProgress.
/**
* Tests purge when logs are rolling or a new snapshot is created, then
* these newer files should alse be excluded in the current cycle.
*
* For frequent snapshotting, configured SnapCount to 30. There are three
* threads which will create 1000 znodes each and simultaneously do purge
* call
*/
@Test
public void testPurgeWhenLogRollingInProgress() throws Exception {
tmpDir = ClientBase.createTmpDir();
ClientBase.setupTestEnv();
ZooKeeperServer zks = new ZooKeeperServer(tmpDir, tmpDir, 3000);
SyncRequestProcessor.setSnapCount(30);
final int PORT = Integer.parseInt(HOSTPORT.split(":")[1]);
ServerCnxnFactory f = ServerCnxnFactory.createFactory(PORT, -1);
f.startup(zks);
Assert.assertTrue("waiting for server being up ", ClientBase.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT));
final ZooKeeper zk = ClientBase.createZKClient(HOSTPORT);
final CountDownLatch doPurge = new CountDownLatch(1);
final CountDownLatch purgeFinished = new CountDownLatch(1);
final AtomicBoolean opFailed = new AtomicBoolean(false);
new Thread() {
public void run() {
try {
doPurge.await(OP_TIMEOUT_IN_MILLIS / 2, TimeUnit.MILLISECONDS);
PurgeTxnLog.purge(tmpDir, tmpDir, 3);
} catch (IOException ioe) {
LOG.error("Exception when purge", ioe);
opFailed.set(true);
} catch (InterruptedException ie) {
LOG.error("Exception when purge", ie);
opFailed.set(true);
} finally {
purgeFinished.countDown();
}
}
;
}.start();
final int thCount = 3;
List<String> znodes = manyClientOps(zk, doPurge, thCount, "/invalidsnap");
Assert.assertTrue("Purging is not finished!", purgeFinished.await(OP_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS));
Assert.assertFalse("Purging failed!", opFailed.get());
for (String znode : znodes) {
try {
zk.getData(znode, false, null);
} catch (Exception ke) {
LOG.error("Unexpected exception when visiting znode!", ke);
Assert.fail("Unexpected exception when visiting znode!");
}
}
zk.close();
f.shutdown();
zks.shutdown();
zks.getTxnLogFactory().close();
}
use of org.apache.zookeeper.ZooKeeper in project zookeeper by apache.
the class PurgeTxnTest method testPurgeDoesNotDeleteOverlappingLogFile.
/**
* Verifies that purge does not delete any log files which started before the oldest retained
* snapshot but which might extend beyond it.
* @throws Exception an exception might be thrown here
*/
@Test
public void testPurgeDoesNotDeleteOverlappingLogFile() throws Exception {
// Setting used for snapRetainCount in this test.
final int SNAP_RETAIN_COUNT = 3;
// Number of znodes this test creates in each snapshot.
final int NUM_ZNODES_PER_SNAPSHOT = 100;
/**
* Set a sufficiently high snapCount to ensure that we don't rollover the log. Normally,
* the default value (100K at time of this writing) would ensure this, but we make that
* dependence explicit here to make the test future-proof. Not rolling over the log is
* important for this test since we are testing retention of the one and only log file which
* predates each retained snapshot.
*/
SyncRequestProcessor.setSnapCount(SNAP_RETAIN_COUNT * NUM_ZNODES_PER_SNAPSHOT * 10);
// Create Zookeeper and connect to it.
tmpDir = ClientBase.createTmpDir();
ClientBase.setupTestEnv();
ZooKeeperServer zks = new ZooKeeperServer(tmpDir, tmpDir, 3000);
final int PORT = Integer.parseInt(HOSTPORT.split(":")[1]);
ServerCnxnFactory f = ServerCnxnFactory.createFactory(PORT, -1);
f.startup(zks);
Assert.assertTrue("waiting for server being up ", ClientBase.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT));
ZooKeeper zk = ClientBase.createZKClient(HOSTPORT);
// Unique identifier for each znode that we create.
int unique = 0;
try {
/**
* Create some znodes and take a snapshot. Repeat this until we have SNAP_RETAIN_COUNT
* snapshots. Do not rollover the log.
*/
for (int snapshotCount = 0; snapshotCount < SNAP_RETAIN_COUNT; snapshotCount++) {
for (int i = 0; i < 100; i++, unique++) {
zk.create("/snap-" + unique, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
zks.takeSnapshot();
}
// Create some additional znodes without taking a snapshot afterwards.
for (int i = 0; i < 100; i++, unique++) {
zk.create("/snap-" + unique, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
} finally {
zk.close();
}
// Shutdown Zookeeper.
f.shutdown();
zks.getTxnLogFactory().close();
zks.shutdown();
Assert.assertTrue("waiting for server to shutdown", ClientBase.waitForServerDown(HOSTPORT, CONNECTION_TIMEOUT));
// Purge snapshot and log files.
PurgeTxnLog.purge(tmpDir, tmpDir, SNAP_RETAIN_COUNT);
// Initialize Zookeeper again from the same dataDir.
zks = new ZooKeeperServer(tmpDir, tmpDir, 3000);
f = ServerCnxnFactory.createFactory(PORT, -1);
f.startup(zks);
zk = ClientBase.createZKClient(HOSTPORT);
/**
* Verify that the last znode that was created above exists. This znode's creation was
* captured by the transaction log which was created before any of the above
* SNAP_RETAIN_COUNT snapshots were created, but it's not captured in any of these
* snapshots. So for it it exist, the (only) existing log file should not have been purged.
*/
final String lastZnode = "/snap-" + (unique - 1);
final Stat stat = zk.exists(lastZnode, false);
Assert.assertNotNull("Last znode does not exist: " + lastZnode, stat);
// Shutdown for the last time.
f.shutdown();
zks.getTxnLogFactory().close();
zks.shutdown();
}
use of org.apache.zookeeper.ZooKeeper in project zookeeper by apache.
the class PurgeTxnTest method testPurge.
/**
* test the purge
* @throws Exception an exception might be thrown here
*/
@Test
public void testPurge() throws Exception {
tmpDir = ClientBase.createTmpDir();
ClientBase.setupTestEnv();
ZooKeeperServer zks = new ZooKeeperServer(tmpDir, tmpDir, 3000);
SyncRequestProcessor.setSnapCount(100);
final int PORT = Integer.parseInt(HOSTPORT.split(":")[1]);
ServerCnxnFactory f = ServerCnxnFactory.createFactory(PORT, -1);
f.startup(zks);
Assert.assertTrue("waiting for server being up ", ClientBase.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT));
ZooKeeper zk = ClientBase.createZKClient(HOSTPORT);
try {
for (int i = 0; i < 2000; i++) {
zk.create("/invalidsnap-" + i, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
} finally {
zk.close();
}
f.shutdown();
zks.getTxnLogFactory().close();
Assert.assertTrue("waiting for server to shutdown", ClientBase.waitForServerDown(HOSTPORT, CONNECTION_TIMEOUT));
// now corrupt the snapshot
PurgeTxnLog.purge(tmpDir, tmpDir, 3);
FileTxnSnapLog snaplog = new FileTxnSnapLog(tmpDir, tmpDir);
List<File> listLogs = snaplog.findNRecentSnapshots(4);
int numSnaps = 0;
for (File ff : listLogs) {
if (ff.getName().startsWith("snapshot")) {
numSnaps++;
}
}
Assert.assertTrue("exactly 3 snapshots ", (numSnaps == 3));
snaplog.close();
zks.shutdown();
}
use of org.apache.zookeeper.ZooKeeper in project zookeeper by apache.
the class SimpleClient method start.
public void start() {
try {
zk = new ZooKeeper(hostPort, 15000, this);
zk.getData("/simpleCase", true, this, null);
if (null != r) {
r.report("Client " + index + " connecting to " + hostPort);
}
} catch (Exception e) {
e.printStackTrace();
}
}
Aggregations