use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class MasterMetadataUtil method updateRootTabletDataFile.
/**
* Update the data file for the root tablet
*/
private static void updateRootTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
IZooReaderWriter zk = ZooReaderWriter.getInstance();
String root = MetadataTableUtil.getZookeeperLogLocation();
for (String entry : unusedWalLogs) {
String[] parts = entry.split("/");
String zpath = root + "/" + parts[parts.length - 1];
while (true) {
try {
if (zk.exists(zpath)) {
log.debug("Removing WAL reference for root table {}", zpath);
zk.recursiveDelete(zpath, NodeMissingPolicy.SKIP);
}
break;
} catch (KeeperException | InterruptedException e) {
log.error("{}", e.getMessage(), e);
}
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class MetadataTableUtil method getRootTabletDir.
public static String getRootTabletDir() throws IOException {
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
String zpath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_PATH;
try {
return new String(zoo.getData(zpath, null), UTF_8);
} catch (KeeperException e) {
throw new IOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException(e);
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class MetadataTableUtil method getRootLogEntries.
static void getRootLogEntries(final ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
String root = getZookeeperLogLocation();
// the data. The log can be removed in between.
while (true) {
result.clear();
for (String child : zoo.getChildren(root)) {
try {
LogEntry e = LogEntry.fromBytes(zoo.getData(root + "/" + child, null));
// upgrade from !0;!0<< -> +r<<
e = new LogEntry(RootTable.EXTENT, 0, e.server, e.filename);
result.add(e);
} catch (KeeperException.NoNodeException ex) {
continue;
}
}
break;
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class CleanZookeeper method main.
/**
* @param args
* must contain one element: the address of a zookeeper node a second parameter provides an additional authentication value
* @throws IOException
* error connecting to accumulo or zookeeper
*/
public static void main(String[] args) throws IOException {
Opts opts = new Opts();
opts.parseArgs(CleanZookeeper.class.getName(), args);
String root = Constants.ZROOT;
IZooReaderWriter zk = ZooReaderWriter.getInstance();
if (opts.auth != null) {
zk.getZooKeeper().addAuthInfo("digest", ("accumulo:" + opts.auth).getBytes(UTF_8));
}
try {
for (String child : zk.getChildren(root)) {
if (Constants.ZINSTANCES.equals("/" + child)) {
for (String instanceName : zk.getChildren(root + Constants.ZINSTANCES)) {
String instanceNamePath = root + Constants.ZINSTANCES + "/" + instanceName;
byte[] id = zk.getData(instanceNamePath, null);
if (id != null && !new String(id, UTF_8).equals(HdfsZooInstance.getInstance().getInstanceID())) {
try {
zk.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
} catch (KeeperException.NoAuthException ex) {
log.warn("Unable to delete {}", instanceNamePath);
}
}
}
} else if (!child.equals(HdfsZooInstance.getInstance().getInstanceID())) {
String path = root + "/" + child;
try {
zk.recursiveDelete(path, NodeMissingPolicy.SKIP);
} catch (KeeperException.NoAuthException ex) {
log.warn("Unable to delete {}", path);
}
}
}
} catch (Exception ex) {
System.out.println("Error Occurred: " + ex);
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class ZKAuthenticator method constructUser.
/**
* Sets up the user in ZK for the provided user. No checking for existence is done here, it should be done before calling.
*/
private void constructUser(String user, byte[] pass) throws KeeperException, InterruptedException {
synchronized (zooCache) {
zooCache.clear();
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
zoo.putPrivatePersistentData(ZKUserPath + "/" + user, pass, NodeExistsPolicy.FAIL);
}
}
Aggregations