use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class ZooZap method main.
public static void main(String[] args) {
Opts opts = new Opts();
opts.parseArgs(ZooZap.class.getName(), args);
if (!opts.zapMaster && !opts.zapTservers && !opts.zapTracers) {
new JCommander(opts).usage();
return;
}
AccumuloConfiguration siteConf = SiteConfiguration.getInstance();
// Login as the server on secure HDFS
if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
SecurityUtil.serverLogin(siteConf);
}
String iid = opts.getInstance().getInstanceID();
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
if (opts.zapMaster) {
String masterLockPath = Constants.ZROOT + "/" + iid + Constants.ZMASTER_LOCK;
try {
zapDirectory(zoo, masterLockPath, opts);
} catch (Exception e) {
e.printStackTrace();
}
}
if (opts.zapTservers) {
String tserversPath = Constants.ZROOT + "/" + iid + Constants.ZTSERVERS;
try {
List<String> children = zoo.getChildren(tserversPath);
for (String child : children) {
message("Deleting " + tserversPath + "/" + child + " from zookeeper", opts);
if (opts.zapMaster)
ZooReaderWriter.getInstance().recursiveDelete(tserversPath + "/" + child, NodeMissingPolicy.SKIP);
else {
String path = tserversPath + "/" + child;
if (zoo.getChildren(path).size() > 0) {
if (!ZooLock.deleteLock(path, "tserver")) {
message("Did not delete " + tserversPath + "/" + child, opts);
}
}
}
}
} catch (Exception e) {
log.error("{}", e.getMessage(), e);
}
}
if (opts.zapTracers) {
String path = opts.getTraceZKPath();
try {
zapDirectory(zoo, path, opts);
} catch (Exception e) {
// do nothing if the /tracers node does not exist.
}
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class ExistingMacIT method testExistingInstance.
@Test
public void testExistingInstance() throws Exception {
Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
conn.tableOperations().create("table1");
BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
Mutation m1 = new Mutation("00081");
m1.put("math", "sqroot", "9");
m1.put("math", "sq", "6560");
bw.addMutation(m1);
bw.close();
conn.tableOperations().flush("table1", null, null, true);
// TOOD use constants
conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
conn.tableOperations().flush(RootTable.NAME, null, null, true);
Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
if (entry.getKey() == ServerType.ZOOKEEPER)
continue;
for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
}
final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID();
while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
log.debug("Accumulo services still have their ZK locks held");
Thread.sleep(1000);
}
File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
FileUtils.deleteQuietly(hadoopConfDir);
assertTrue(hadoopConfDir.mkdirs());
createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
FileUtils.deleteQuietly(testDir2);
MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
accumulo2.start();
conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
int sum = 0;
for (Entry<Key, Value> entry : scanner) {
sum += Integer.parseInt(entry.getValue().toString());
}
Assert.assertEquals(6569, sum);
}
accumulo2.stop();
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class ZooStore method remove.
@Override
public void remove(String path) throws DistributedStoreException {
try {
log.debug("Removing {}", path);
path = relative(path);
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
if (zoo.exists(path))
zoo.recursiveDelete(path, NodeMissingPolicy.SKIP);
cache.clear();
} catch (Exception ex) {
throw new DistributedStoreException(ex);
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class KerberosAuthenticator method createUserNodeInZk.
private void createUserNodeInZk(String principal) throws KeeperException, InterruptedException {
synchronized (zooCache) {
zooCache.clear();
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
zoo.putPrivatePersistentData(zkUserPath + "/" + principal, new byte[0], NodeExistsPolicy.FAIL);
}
}
use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.
the class DeadServerList method getList.
public List<DeadServer> getList() {
List<DeadServer> result = new ArrayList<>();
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
try {
List<String> children = zoo.getChildren(path);
if (children != null) {
for (String child : children) {
Stat stat = new Stat();
byte[] data;
try {
data = zoo.getData(path + "/" + child, stat);
} catch (NoNodeException nne) {
// in the dead server list.
continue;
}
DeadServer server = new DeadServer(child, stat.getMtime(), new String(data, UTF_8));
result.add(server);
}
}
} catch (Exception ex) {
log.error("{}", ex.getMessage(), ex);
}
return result;
}
Aggregations