use of org.apache.flink.shaded.curator5.org.apache.curator.framework.recipes.cache.PathChildrenCache in project hadoop by apache.
the class ZKDelegationTokenSecretManager method startThreads.
@Override
public void startThreads() throws IOException {
if (!isExternalClient) {
try {
zkClient.start();
} catch (Exception e) {
throw new IOException("Could not start Curator Framework", e);
}
} else {
// If namespace parents are implicitly created, they won't have ACLs.
// So, let's explicitly create them.
CuratorFramework nullNsFw = zkClient.usingNamespace(null);
EnsurePath ensureNs = nullNsFw.newNamespaceAwareEnsurePath("/" + zkClient.getNamespace());
try {
ensureNs.ensure(nullNsFw.getZookeeperClient());
} catch (Exception e) {
throw new IOException("Could not create namespace", e);
}
}
listenerThreadPool = Executors.newSingleThreadExecutor();
try {
delTokSeqCounter = new SharedCount(zkClient, ZK_DTSM_SEQNUM_ROOT, 0);
if (delTokSeqCounter != null) {
delTokSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start Sequence Counter", e);
}
try {
keyIdSeqCounter = new SharedCount(zkClient, ZK_DTSM_KEYID_ROOT, 0);
if (keyIdSeqCounter != null) {
keyIdSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start KeyId Counter", e);
}
try {
createPersistentNode(ZK_DTSM_MASTER_KEY_ROOT);
createPersistentNode(ZK_DTSM_TOKENS_ROOT);
} catch (Exception e) {
throw new RuntimeException("Could not create ZK paths");
}
try {
keyCache = new PathChildrenCache(zkClient, ZK_DTSM_MASTER_KEY_ROOT, true);
if (keyCache != null) {
keyCache.start(StartMode.BUILD_INITIAL_CACHE);
keyCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_UPDATED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_REMOVED:
processKeyRemoved(event.getData().getPath());
break;
default:
break;
}
}
}, listenerThreadPool);
loadFromZKCache(false);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for keys", e);
}
try {
tokenCache = new PathChildrenCache(zkClient, ZK_DTSM_TOKENS_ROOT, true);
if (tokenCache != null) {
tokenCache.start(StartMode.BUILD_INITIAL_CACHE);
tokenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_UPDATED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_REMOVED:
processTokenRemoved(event.getData());
break;
default:
break;
}
}
}, listenerThreadPool);
loadFromZKCache(true);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for tokens", e);
}
super.startThreads();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.recipes.cache.PathChildrenCache in project druid by druid-io.
the class Announcer method stop.
@LifecycleStop
public void stop() {
synchronized (toAnnounce) {
if (!started) {
return;
}
started = false;
Closer closer = Closer.create();
for (PathChildrenCache cache : listeners.values()) {
closer.register(cache);
}
try {
CloseQuietly.close(closer);
} finally {
pathChildrenCacheExecutor.shutdown();
}
for (Map.Entry<String, ConcurrentMap<String, byte[]>> entry : announcements.entrySet()) {
String basePath = entry.getKey();
for (String announcementPath : entry.getValue().keySet()) {
unannounce(ZKPaths.makePath(basePath, announcementPath));
}
}
if (!parentsIBuilt.isEmpty()) {
CuratorTransaction transaction = curator.inTransaction();
for (String parent : parentsIBuilt) {
try {
transaction = transaction.delete().forPath(parent).and();
} catch (Exception e) {
log.info(e, "Unable to delete parent[%s], boooo.", parent);
}
}
try {
((CuratorTransactionFinal) transaction).commit();
} catch (Exception e) {
log.info(e, "Unable to commit transaction. Please feed the hamsters");
}
}
}
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.recipes.cache.PathChildrenCache in project BRFS by zhangnianli.
the class FileCenter method main.
public static void main(String[] args) {
id = new Random().nextInt(10);
System.out.println("id = " + id);
System.out.println("hahahahha");
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client = CuratorFrameworkFactory.builder().namespace(ROOT).connectString(zk_address).retryPolicy(retryPolicy).build();
client.start();
try {
Stat stat = client.checkExists().forPath(DUPS);
System.out.println("stat =" + stat);
if (stat == null) {
System.out.println("create--" + client.create().forPath(DUPS));
}
ExecutorService pool = Executors.newFixedThreadPool(5);
PathChildrenCache pathCache = new PathChildrenCache(client, DUPS, true, false, pool);
pathCache.getListenable().addListener(new PathNodeListener());
pathCache.start();
// TreeCache cache = new TreeCache(client, DUPS);
// cache.getListenable().addListener(new TreeNodeListener(), pool);
// cache.start();
} catch (Exception e) {
e.printStackTrace();
}
synchronized (client) {
try {
client.wait();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
client.close();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.recipes.cache.PathChildrenCache in project BRFS by zhangnianli.
the class TestZKNode method main.
public static void main(String[] args) throws Exception {
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client = CuratorFrameworkFactory.newClient(zk_address, 5 * 1000, 30 * 1000, retryPolicy);
client.start();
client.blockUntilConnected();
PathChildrenCache cache = new PathChildrenCache(client.usingNamespace("test"), "/fileCoordinator/big", true);
cache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
System.out.println("---" + event);
ChildData data = event.getData();
if (data != null) {
switch(event.getType()) {
case CHILD_ADDED:
System.out.println("###PATH-" + data.getPath());
break;
default:
break;
}
}
}
});
cache.start();
PersistentNode node = new PersistentNode(client.usingNamespace("test"), CreateMode.EPHEMERAL, false, "/fileCoordinator/temp-1", "node1".getBytes());
node.getListenable().addListener(new PersistentNodeListener() {
@Override
public void nodeCreated(String path) throws Exception {
System.out.println("node1--created:" + path);
}
});
node.start();
PersistentNode node2 = new PersistentNode(client.usingNamespace("test"), CreateMode.EPHEMERAL, false, "/fileCoordinator/temp-1", "node2".getBytes());
node2.getListenable().addListener(new PersistentNodeListener() {
@Override
public void nodeCreated(String path) throws Exception {
System.out.println("node2--created:" + path);
}
});
node2.start();
Thread.sleep(2000);
node2.close();
synchronized (node) {
node.wait();
}
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.recipes.cache.PathChildrenCache in project coprhd-controller by CoprHD.
the class DistributedDataManagerImpl method ensureCacheStarted.
/**
* Use the double-check algorithm to initialize the child path cache for use in limit checking
*/
private void ensureCacheStarted() {
if (_basePathCache == null) {
synchronized (this) {
if (_basePathCache == null) {
try {
_basePathCache = new PathChildrenCache(_zkClient, _basePath, false);
_basePathCache.start();
} catch (Exception ex) {
_basePathCache = null;
_log.error(String.format("%s: error initializing cache; will re-attempt", _basePath), ex);
}
}
}
}
}
Aggregations