use of org.apache.accumulo.server.zookeeper.ZooReaderWriter in project accumulo by apache.
the class ChangeSecret method rewriteZooKeeperInstance.
private static void rewriteZooKeeperInstance(final Instance inst, final String newInstanceId, String oldPass, String newPass) throws Exception {
final ZooReaderWriter orig = new ZooReaderWriter(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut(), oldPass);
final IZooReaderWriter new_ = new ZooReaderWriter(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut(), newPass);
String root = ZooUtil.getRoot(inst);
recurse(orig, root, new Visitor() {
@Override
public void visit(ZooReader zoo, String path) throws Exception {
String newPath = path.replace(inst.getInstanceID(), newInstanceId);
byte[] data = zoo.getData(path, null);
List<ACL> acls = orig.getZooKeeper().getACL(path, new Stat());
if (acls.containsAll(Ids.READ_ACL_UNSAFE)) {
new_.putPersistentData(newPath, data, NodeExistsPolicy.FAIL);
} else {
// upgrade
if (acls.containsAll(Ids.OPEN_ACL_UNSAFE)) {
// make user nodes private, they contain the user's password
String[] parts = path.split("/");
if (parts[parts.length - 2].equals("users")) {
new_.putPrivatePersistentData(newPath, data, NodeExistsPolicy.FAIL);
} else {
// everything else can have the readable acl
new_.putPersistentData(newPath, data, NodeExistsPolicy.FAIL);
}
} else {
new_.putPrivatePersistentData(newPath, data, NodeExistsPolicy.FAIL);
}
}
}
});
String path = "/accumulo/instances/" + inst.getInstanceName();
orig.recursiveDelete(path, NodeMissingPolicy.SKIP);
new_.putPersistentData(path, newInstanceId.getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
}
use of org.apache.accumulo.server.zookeeper.ZooReaderWriter in project accumulo by apache.
the class ChangeSecret method deleteInstance.
private static void deleteInstance(Instance origInstance, String oldPass) throws Exception {
IZooReaderWriter orig = new ZooReaderWriter(origInstance.getZooKeepers(), origInstance.getZooKeepersSessionTimeOut(), oldPass);
orig.recursiveDelete("/accumulo/" + origInstance.getInstanceID(), NodeMissingPolicy.SKIP);
}
use of org.apache.accumulo.server.zookeeper.ZooReaderWriter in project accumulo by apache.
the class ZooKeeperInitializationTest method parentNodesAreCreatedWhenMissing.
@Test
public void parentNodesAreCreatedWhenMissing() throws Exception {
ZooReaderWriter zReaderWriter = createMock(ZooReaderWriter.class);
String zRoot = "/accumulo";
expect(zReaderWriter.exists(zRoot + ReplicationConstants.ZOO_TSERVERS, null)).andReturn(false).once();
zReaderWriter.mkdirs(zRoot + ReplicationConstants.ZOO_TSERVERS);
expectLastCall().once();
expect(zReaderWriter.exists(zRoot + ReplicationConstants.ZOO_WORK_QUEUE, null)).andReturn(false).once();
zReaderWriter.mkdirs(zRoot + ReplicationConstants.ZOO_WORK_QUEUE);
expectLastCall().once();
replay(zReaderWriter);
ZooKeeperInitialization.ensureZooKeeperInitialized(zReaderWriter, zRoot);
verify(zReaderWriter);
}
use of org.apache.accumulo.server.zookeeper.ZooReaderWriter in project accumulo by apache.
the class GarbageCollectorIT method killMacGc.
private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException {
// kill gc started by MAC
getCluster().killProcess(ServerType.GARBAGE_COLLECTOR, getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next());
// delete lock in zookeeper if there, this will allow next GC to start quickly
String path = ZooUtil.getRoot(new ZooKeeperInstance(getCluster().getClientConfig())) + Constants.ZGC_LOCK;
ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
try {
ZooLock.deleteLock(zk, path);
} catch (IllegalStateException e) {
}
assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR));
}
use of org.apache.accumulo.server.zookeeper.ZooReaderWriter in project accumulo by apache.
the class GarbageCollectorIT method testProperPortAdvertisement.
@Test
public void testProperPortAdvertisement() throws Exception {
Connector conn = getConnector();
Instance instance = conn.getInstance();
ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
for (int i = 0; i < 5; i++) {
List<String> locks;
try {
locks = zk.getChildren(path, null);
} catch (NoNodeException e) {
Thread.sleep(5000);
continue;
}
if (locks != null && locks.size() > 0) {
Collections.sort(locks);
String lockPath = path + "/" + locks.get(0);
String gcLoc = new String(zk.getData(lockPath, null));
Assert.assertTrue("Found unexpected data in zookeeper for GC location: " + gcLoc, gcLoc.startsWith(Service.GC_CLIENT.name()));
int loc = gcLoc.indexOf(ServerServices.SEPARATOR_CHAR);
Assert.assertNotEquals("Could not find split point of GC location for: " + gcLoc, -1, loc);
String addr = gcLoc.substring(loc + 1);
int addrSplit = addr.indexOf(':');
Assert.assertNotEquals("Could not find split of GC host:port for: " + addr, -1, addrSplit);
String host = addr.substring(0, addrSplit), port = addr.substring(addrSplit + 1);
// We shouldn't have the "bindall" address in zk
Assert.assertNotEquals("0.0.0.0", host);
// Nor should we have the "random port" in zk
Assert.assertNotEquals(0, Integer.parseInt(port));
return;
}
Thread.sleep(5000);
}
Assert.fail("Could not find advertised GC address");
}
Aggregations