use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project Saturn by vipshop.
the class DashboardServiceImpl method refreshStatistics2DB.
private void refreshStatistics2DB(ZkCluster zkCluster) {
HashMap<String, JobStatistics> /**
* {jobname}-{domain}
*/
jobMap = new HashMap<>();
HashMap<String, ExecutorStatistics> /**
* {executorName}-{domain}
*/
executorMap = new HashMap<>();
List<JobStatistics> jobList = new ArrayList<>();
List<ExecutorStatistics> executorList = new ArrayList<>();
List<AbnormalJob> unnormalJobList = new ArrayList<>();
List<AbnormalJob> unableFailoverJobList = new ArrayList<>();
List<Timeout4AlarmJob> timeout4AlarmJobList = new ArrayList<>();
List<DomainStatistics> domainList = new ArrayList<>();
List<AbnormalContainer> abnormalContainerList = new ArrayList<>();
// 不同版本的域数量
Map<String, Long> versionDomainNumber = new HashMap<>();
// 不同版本的executor数量
Map<String, Long> versionExecutorNumber = new HashMap<>();
int exeInDocker = 0;
int exeNotInDocker = 0;
int totalCount = 0;
int errorCount = 0;
for (RegistryCenterConfiguration config : zkCluster.getRegCenterConfList()) {
// 过滤非当前zk连接
if (zkCluster.getZkAddr().equals(config.getZkAddressList())) {
int processCountOfThisDomainAllTime = 0;
int errorCountOfThisDomainAllTime = 0;
int processCountOfThisDomainThisDay = 0;
int errorCountOfThisDomainThisDay = 0;
DomainStatistics domain = new DomainStatistics(config.getNamespace(), zkCluster.getZkAddr(), config.getNameAndNamespace());
RegistryCenterClient registryCenterClient = registryCenterService.connect(config.getNameAndNamespace());
try {
if (registryCenterClient != null && registryCenterClient.isConnected()) {
CuratorFramework curatorClient = registryCenterClient.getCuratorClient();
CuratorFrameworkOp curatorFrameworkOp = curatorRepository.newCuratorFrameworkOp(curatorClient);
// 统计稳定性
if (checkExists(curatorClient, ExecutorNodePath.SHARDING_COUNT_PATH)) {
String countStr = getData(curatorClient, ExecutorNodePath.SHARDING_COUNT_PATH);
domain.setShardingCount(Integer.valueOf(countStr));
}
// 该域的版本号
String version = null;
// 该域的在线executor数量
long executorNumber = 0L;
// 统计物理容器资源,统计版本数据
if (null != curatorClient.checkExists().forPath(ExecutorNodePath.getExecutorNodePath())) {
List<String> executors = curatorClient.getChildren().forPath(ExecutorNodePath.getExecutorNodePath());
if (executors != null) {
for (String exe : executors) {
// 在线的才统计
if (null != curatorClient.checkExists().forPath(ExecutorNodePath.getExecutorIpNodePath(exe))) {
// 统计是物理机还是容器
String executorMapKey = exe + "-" + config.getNamespace();
ExecutorStatistics executorStatistics = executorMap.get(executorMapKey);
if (executorStatistics == null) {
executorStatistics = new ExecutorStatistics(exe, config.getNamespace());
executorStatistics.setNns(domain.getNns());
executorStatistics.setIp(getData(curatorClient, ExecutorNodePath.getExecutorIpNodePath(exe)));
executorMap.put(executorMapKey, executorStatistics);
}
// set runInDocker field
if (checkExists(curatorClient, ExecutorNodePath.get$ExecutorTaskNodePath(exe))) {
executorStatistics.setRunInDocker(true);
exeInDocker++;
} else {
exeNotInDocker++;
}
}
// 获取版本号
if (version == null) {
version = getData(curatorClient, ExecutorNodePath.getExecutorVersionNodePath(exe));
}
}
executorNumber = executors.size();
}
}
// 统计版本数据
if (version == null) {
// 未知版本
version = "-1";
}
if (versionDomainNumber.containsKey(version)) {
Long domainNumber = versionDomainNumber.get(version);
versionDomainNumber.put(version, domainNumber + 1);
} else {
versionDomainNumber.put(version, 1L);
}
if (versionExecutorNumber.containsKey(version)) {
Long executorNumber0 = versionExecutorNumber.get(version);
versionExecutorNumber.put(version, executorNumber0 + executorNumber);
} else {
if (executorNumber != 0) {
versionExecutorNumber.put(version, executorNumber);
}
}
// 遍历所有$Jobs子节点,非系统作业
List<String> jobs = jobDimensionService.getAllUnSystemJobs(curatorFrameworkOp);
SaturnStatistics saturnStatistics = saturnStatisticsService.findStatisticsByNameAndZkList(StatisticsTableKeyConstant.UNNORMAL_JOB, zkCluster.getZkAddr());
List<AbnormalJob> oldAbnormalJobs = new ArrayList<>();
if (saturnStatistics != null) {
String result = saturnStatistics.getResult();
if (StringUtils.isNotBlank(result)) {
oldAbnormalJobs = JSON.parseArray(result, AbnormalJob.class);
}
}
saturnStatistics = saturnStatisticsService.findStatisticsByNameAndZkList(StatisticsTableKeyConstant.TIMEOUT_4_ALARM_JOB, zkCluster.getZkAddr());
List<Timeout4AlarmJob> oldTimeout4AlarmJobs = new ArrayList<>();
if (saturnStatistics != null) {
String result = saturnStatistics.getResult();
if (StringUtils.isNotBlank(result)) {
oldTimeout4AlarmJobs = JSON.parseArray(result, Timeout4AlarmJob.class);
}
}
for (String job : jobs) {
try {
Boolean localMode = Boolean.valueOf(getData(curatorClient, JobNodePath.getConfigNodePath(job, "localMode")));
String jobDomainKey = job + "-" + config.getNamespace();
JobStatistics jobStatistics = jobMap.get(jobDomainKey);
if (jobStatistics == null) {
jobStatistics = new JobStatistics(job, config.getNamespace(), config.getNameAndNamespace());
jobMap.put(jobDomainKey, jobStatistics);
}
String jobDegree = getData(curatorClient, JobNodePath.getConfigNodePath(job, "jobDegree"));
if (Strings.isNullOrEmpty(jobDegree)) {
jobDegree = "0";
}
jobStatistics.setJobDegree(Integer.parseInt(jobDegree));
// 非本地作业才参与判断
if (!localMode) {
AbnormalJob unnormalJob = new AbnormalJob(job, config.getNamespace(), config.getNameAndNamespace(), config.getDegree());
checkJavaOrShellJobHasProblem(oldAbnormalJobs, curatorClient, unnormalJob, jobDegree, unnormalJobList);
}
// 查找超时告警作业
Timeout4AlarmJob timeout4AlarmJob = new Timeout4AlarmJob(job, config.getNamespace(), config.getNameAndNamespace(), config.getDegree());
if (isTimeout4AlarmJob(oldTimeout4AlarmJobs, timeout4AlarmJob, curatorFrameworkOp) != null) {
timeout4AlarmJob.setJobDegree(jobDegree);
timeout4AlarmJobList.add(timeout4AlarmJob);
}
// 查找无法高可用的作业
AbnormalJob unableFailoverJob = new AbnormalJob(job, config.getNamespace(), config.getNameAndNamespace(), config.getDegree());
if (isUnableFailoverJob(curatorClient, unableFailoverJob, curatorFrameworkOp) != null) {
unableFailoverJob.setJobDegree(jobDegree);
unableFailoverJobList.add(unableFailoverJob);
}
String processCountOfThisJobAllTimeStr = getData(curatorClient, JobNodePath.getProcessCountPath(job));
String errorCountOfThisJobAllTimeStr = getData(curatorClient, JobNodePath.getErrorCountPath(job));
int processCountOfThisJobAllTime = processCountOfThisJobAllTimeStr == null ? 0 : Integer.valueOf(processCountOfThisJobAllTimeStr);
int errorCountOfThisJobAllTime = processCountOfThisJobAllTimeStr == null ? 0 : Integer.valueOf(errorCountOfThisJobAllTimeStr);
processCountOfThisDomainAllTime += processCountOfThisJobAllTime;
errorCountOfThisDomainAllTime += errorCountOfThisJobAllTime;
int processCountOfThisJobThisDay = 0;
int errorCountOfThisJobThisDay = 0;
// loadLevel of this job
int loadLevel = Integer.parseInt(getData(curatorClient, JobNodePath.getConfigNodePath(job, "loadLevel")));
int shardingTotalCount = Integer.parseInt(getData(curatorClient, JobNodePath.getConfigNodePath(job, "shardingTotalCount")));
List<String> servers = null;
if (null != curatorClient.checkExists().forPath(JobNodePath.getServerNodePath(job))) {
servers = curatorClient.getChildren().forPath(JobNodePath.getServerNodePath(job));
for (String server : servers) {
// processFailureCount,用以统计作业每天的执行次数;2.统计executor的loadLevel;,
if (checkExists(curatorClient, JobNodePath.getServerStatus(job, server))) {
// processFailureCount,用以统计作业每天的执行次数;
try {
String processSuccessCountOfThisExeStr = getData(curatorClient, JobNodePath.getProcessSucessCount(job, server));
String processFailureCountOfThisExeStr = getData(curatorClient, JobNodePath.getProcessFailureCount(job, server));
int processSuccessCountOfThisExe = processSuccessCountOfThisExeStr == null ? 0 : Integer.valueOf(processSuccessCountOfThisExeStr);
int processFailureCountOfThisExe = processFailureCountOfThisExeStr == null ? 0 : Integer.valueOf(processFailureCountOfThisExeStr);
// 该作业当天运行统计
processCountOfThisJobThisDay += processSuccessCountOfThisExe + processFailureCountOfThisExe;
errorCountOfThisJobThisDay += processFailureCountOfThisExe;
// 全部域当天的成功数与失败数
totalCount += processSuccessCountOfThisExe + processFailureCountOfThisExe;
errorCount += processFailureCountOfThisExe;
// 全域当天运行统计
processCountOfThisDomainThisDay += processCountOfThisJobThisDay;
errorCountOfThisDomainThisDay += errorCountOfThisJobThisDay;
// executor当天运行成功失败数
String executorMapKey = server + "-" + config.getNamespace();
ExecutorStatistics executorStatistics = executorMap.get(executorMapKey);
if (executorStatistics == null) {
executorStatistics = new ExecutorStatistics(server, config.getNamespace());
executorStatistics.setNns(domain.getNns());
executorStatistics.setIp(getData(curatorClient, ExecutorNodePath.getExecutorIpNodePath(server)));
executorMap.put(executorMapKey, executorStatistics);
}
executorStatistics.setFailureCountOfTheDay(executorStatistics.getFailureCountOfTheDay() + processFailureCountOfThisExe);
executorStatistics.setProcessCountOfTheDay(executorStatistics.getProcessCountOfTheDay() + processSuccessCountOfThisExe + processFailureCountOfThisExe);
} catch (Exception e) {
log.info(e.getMessage());
}
// 2.统计executor的loadLevel;
try {
// enabled 的作业才需要计算权重
if (Boolean.valueOf(getData(curatorClient, JobNodePath.getConfigNodePath(job, "enabled")))) {
String sharding = getData(curatorClient, JobNodePath.getServerSharding(job, server));
if (StringUtils.isNotEmpty(sharding)) {
// 更新job的executorsAndshards
String exesAndShards = (jobStatistics.getExecutorsAndShards() == null ? "" : jobStatistics.getExecutorsAndShards()) + server + ":" + sharding + "; ";
jobStatistics.setExecutorsAndShards(exesAndShards);
// 2.统计是物理机还是容器
String executorMapKey = server + "-" + config.getNamespace();
ExecutorStatistics executorStatistics = executorMap.get(executorMapKey);
if (executorStatistics == null) {
executorStatistics = new ExecutorStatistics(server, config.getNamespace());
executorStatistics.setNns(domain.getNns());
executorStatistics.setIp(getData(curatorClient, ExecutorNodePath.getExecutorIpNodePath(server)));
executorMap.put(executorMapKey, executorStatistics);
// set runInDocker field
if (checkExists(curatorClient, ExecutorNodePath.get$ExecutorTaskNodePath(server))) {
executorStatistics.setRunInDocker(true);
exeInDocker++;
} else {
exeNotInDocker++;
}
}
if (executorStatistics.getJobAndShardings() != null) {
executorStatistics.setJobAndShardings(executorStatistics.getJobAndShardings() + job + ":" + sharding + ";");
} else {
executorStatistics.setJobAndShardings(job + ":" + sharding + ";");
}
int newLoad = executorStatistics.getLoadLevel() + (loadLevel * sharding.split(",").length);
executorStatistics.setLoadLevel(newLoad);
}
}
} catch (Exception e) {
log.info(e.getMessage());
}
}
}
}
// local-mode job = server count(regardless server status)
if (localMode) {
jobStatistics.setTotalLoadLevel(servers == null ? 0 : (servers.size() * loadLevel));
} else {
jobStatistics.setTotalLoadLevel(loadLevel * shardingTotalCount);
}
jobStatistics.setErrorCountOfAllTime(errorCountOfThisJobAllTime);
jobStatistics.setProcessCountOfAllTime(processCountOfThisJobAllTime);
jobStatistics.setFailureCountOfTheDay(errorCountOfThisJobThisDay);
jobStatistics.setProcessCountOfTheDay(processCountOfThisJobThisDay);
jobMap.put(jobDomainKey, jobStatistics);
} catch (Exception e) {
log.info("statistics namespace:{} ,jobName:{} ,exception:{}", domain.getNns(), job, e.getMessage());
}
}
// 遍历容器资源,获取异常资源
String dcosTasksNodePath = ContainerNodePath.getDcosTasksNodePath();
List<String> tasks = curatorFrameworkOp.getChildren(dcosTasksNodePath);
if (tasks != null && !tasks.isEmpty()) {
for (String taskId : tasks) {
AbnormalContainer abnormalContainer = new AbnormalContainer(taskId, config.getNamespace(), config.getNameAndNamespace(), config.getDegree());
if (isContainerInstanceMismatch(abnormalContainer, curatorFrameworkOp) != null) {
abnormalContainerList.add(abnormalContainer);
}
}
}
}
} catch (Exception e) {
log.info("refreshStatistics2DB namespace:{} ,exception:{}", domain.getNns(), e.getMessage());
}
domain.setErrorCountOfAllTime(errorCountOfThisDomainAllTime);
domain.setProcessCountOfAllTime(processCountOfThisDomainAllTime);
domain.setErrorCountOfTheDay(errorCountOfThisDomainThisDay);
domain.setProcessCountOfTheDay(processCountOfThisDomainThisDay);
domainList.add(domain);
}
}
jobList.addAll(jobMap.values());
executorList.addAll(executorMap.values());
// 全域当天处理总数,失败总数
saveOrUpdateDomainProcessCount(new ZkStatistics(totalCount, errorCount), zkCluster.getZkAddr());
// 失败率Top10的域列表
saveOrUpdateTop10FailDomain(domainList, zkCluster.getZkAddr());
// 稳定性最差的Top10的域列表
saveOrUpdateTop10UnstableDomain(domainList, zkCluster.getZkAddr());
// 稳定性最差的Top10的executor列表
saveOrUpdateTop10FailExecutor(executorList, zkCluster.getZkAddr());
// 根据失败率Top10的作业列表
saveOrUpdateTop10FailJob(jobList, zkCluster.getZkAddr());
// 最活跃作业Top10的作业列表(即当天执行次数最多的作业)
saveOrUpdateTop10ActiveJob(jobList, zkCluster.getZkAddr());
// 负荷最重的Top10的作业列表
saveOrUpdateTop10LoadJob(jobList, zkCluster.getZkAddr());
// 负荷最重的Top10的Executor列表
saveOrUpdateTop10LoadExecutor(executorList, zkCluster.getZkAddr());
// 异常作业列表 (如下次调度时间已经过了,但是作业没有被调度)
saveOrUpdateAbnormalJob(unnormalJobList, zkCluster.getZkAddr());
// 超时告警的作业列表
saveOrUpdateTimeout4AlarmJob(timeout4AlarmJobList, zkCluster.getZkAddr());
// 无法高可用的作业列表
saveOrUpdateUnableFailoverJob(unableFailoverJobList, zkCluster.getZkAddr());
// 异常容器资源列表,包含实例数不匹配的资源列表
saveOrUpdateAbnormalContainer(abnormalContainerList, zkCluster.getZkAddr());
// 不同版本的域数量
saveOrUpdateVersionDomainNumber(versionDomainNumber, zkCluster.getZkAddr());
// 不同版本的executor数量
saveOrUpdateVersionExecutorNumber(versionExecutorNumber, zkCluster.getZkAddr());
// 不同作业等级的作业数量
saveOrUpdateJobRankDistribution(jobList, zkCluster.getZkAddr());
// 容器executor数量
saveOrUpdateExecutorInDockerCount(exeInDocker, zkCluster.getZkAddr());
// 物理机executor数量
saveOrUpdateExecutorNotInDockerCount(exeNotInDocker, zkCluster.getZkAddr());
// 作业数量
saveOrUpdateJobCount(jobList.size(), zkCluster.getZkAddr());
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project Saturn by vipshop.
the class NamespaceZkClusterMappingServiceImpl method moveNamespaceTo.
@Transactional(rollbackFor = { SaturnJobConsoleException.class })
@Override
public void moveNamespaceTo(String namespace, String zkClusterKeyNew, String lastUpdatedBy, boolean updateDBOnly) throws SaturnJobConsoleException {
try {
log.info("start move {} to {}", namespace, zkClusterKeyNew);
if (updateDBOnly) {
namespaceZkclusterMapping4SqlService.update(namespace, null, zkClusterKeyNew, lastUpdatedBy);
} else {
String zkClusterKey = namespaceZkclusterMapping4SqlService.getZkClusterKey(namespace);
if (zkClusterKey != null && zkClusterKey.equals(zkClusterKeyNew)) {
// see
throw new SaturnJobConsoleException("The namespace(" + namespace + ") is in " + zkClusterKey);
// moveNamespaceBatchTo
// before
// modify
}
ZkCluster zkCluster = registryCenterService.getZkCluster(zkClusterKeyNew);
if (zkCluster == null) {
throw new SaturnJobConsoleException("The " + zkClusterKeyNew + " is not exists");
}
if (zkCluster.isOffline()) {
throw new SaturnJobConsoleException("The " + zkClusterKeyNew + " zkCluster is offline");
}
String zkAddr = zkCluster.getZkAddr();
CuratorRepository.CuratorFrameworkOp curatorFrameworkOp = registryCenterService.connectOnly(zkAddr, null);
if (curatorFrameworkOp == null) {
throw new SaturnJobConsoleException("The " + zkClusterKeyNew + " zkCluster is offline");
}
CuratorFramework curatorFramework = curatorFrameworkOp.getCuratorFramework();
CuratorRepository.CuratorFrameworkOp curatorFrameworkOpByNamespace = registryCenterService.connectOnly(zkAddr, namespace);
CuratorFramework curatorFrameworkByNamespace = curatorFrameworkOpByNamespace.getCuratorFramework();
try {
String namespaceNodePath = "/" + namespace;
if (curatorFramework.checkExists().forPath(namespaceNodePath) != null) {
curatorFramework.delete().deletingChildrenIfNeeded().forPath(namespaceNodePath);
}
String jobsNodePath = namespaceNodePath + JobNodePath.get$JobsNodePath();
curatorFramework.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(jobsNodePath);
List<CurrentJobConfig> configs = currentJobConfigService.findConfigsByNamespace(namespace);
log.info("get configs success, {}", namespace);
if (configs != null) {
for (CurrentJobConfig jobConfig : configs) {
jobOperationService.persistJobFromDB(jobConfig, curatorFrameworkOpByNamespace);
log.info("move {}-{} to zk success", namespace, jobConfig.getJobName());
}
}
} finally {
curatorFramework.close();
curatorFrameworkByNamespace.close();
}
log.info("move {} to zk {} success", namespace, zkClusterKeyNew);
namespaceZkclusterMapping4SqlService.update(namespace, null, zkClusterKeyNew, lastUpdatedBy);
log.info("update mapping table success, {}-{}", namespace, zkClusterKeyNew);
}
} catch (SaturnJobConsoleException e) {
log.error(e.getMessage(), e);
throw e;
} catch (Exception e) {
log.error(e.getMessage(), e);
throw new SaturnJobConsoleException(e);
} finally {
log.info("end move {} to {}", namespace, zkClusterKeyNew);
}
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project Saturn by vipshop.
the class RegistryCenterServiceImpl method initMoveInNamespace.
private void initMoveInNamespace(List<String> allOnlineNamespacesTemp, String zkClusterKey, ZkCluster zkCluster, List<NamespaceZkClusterMapping> nsZkClusterMappingList, List<RegistryCenterConfiguration> regCenterConfList) {
if (nsZkClusterMappingList == null || zkCluster.isOffline()) {
return;
}
for (NamespaceZkClusterMapping mapping : nsZkClusterMappingList) {
String namespace = mapping.getNamespace();
String name = StringUtils.deleteWhitespace(mapping.getName());
if (SaturnSelfNodePath.ROOT_NAME.equals(namespace)) {
log.error("The namespace cannot be {}", SaturnSelfNodePath.ROOT_NAME);
continue;
}
boolean include = false;
if (regCenterConfList != null) {
for (RegistryCenterConfiguration conf : regCenterConfList) {
if (!namespace.equals(conf.getNamespace())) {
continue;
}
include = true;
String nnsOld = conf.getNameAndNamespace();
// update name
conf.setName(name);
conf.initNameAndNamespace();
String nnsNew = conf.getNameAndNamespace();
if (!nnsOld.equals(nnsNew)) {
synchronized (getNnsLock(nnsOld)) {
closeNamespace(nnsOld);
log.info("closed the namespace info because it's nns is changed, namespace is {}", namespace);
}
}
break;
}
}
if (!include) {
CuratorFramework curatorFramework = zkCluster.getCuratorFramework();
initNamespaceZkNodeIfNecessary(namespace, curatorFramework);
RegistryCenterConfiguration conf = new RegistryCenterConfiguration(name, namespace, zkCluster.getZkAddr());
conf.setZkClusterKey(zkClusterKey);
conf.setVersion(getVersion(namespace, curatorFramework));
conf.setZkAlias(zkCluster.getZkAlias());
NamespaceInfo namespaceInfo = getNamespaceInfo(namespace);
if (namespaceInfo != null) {
postConstructRegistryCenterConfiguration(conf, namespaceInfo.getContent());
}
zkCluster.getRegCenterConfList().add(conf);
}
if (!allOnlineNamespacesTemp.contains(namespace)) {
allOnlineNamespacesTemp.add(namespace);
}
}
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project BRFS by zhangnianli.
the class CuratorZookeeperClientTest method testCuratorListener.
// public void testGetClientInstance() throws Exception {
// CuratorZookeeperClient client = CuratorZookeeperClient.getClientInstance(zkUrl, retry, sessionTimeoutMs, connectionTimeoutMs, isWaitConnection);
// assertNotNull(client);
// client.close();
// }
//
// public void testCurd() throws Exception {
// boolean flag = false;
// CuratorZookeeperClient client = CuratorZookeeperClient.getClientInstance(zkUrl);
//
// client.createEphemeral("/brfs/wz/test/createEphemeral", true);
// flag = client.checkExists("/brfs/wz/test/createEphemeral");
// assertEquals(flag, true);
//
// client.createPersistent("/brfs/wz/test/createPersistent", true);
// flag = client.checkExists("/brfs/wz/test/createPersistent");
// assertEquals(flag, true);
//
// client.guaranteedDelete("/brfs/wz/test/createPersistent", false);
// flag = client.checkExists("/brfs/wz/test/createPersistent");
// assertEquals(flag, false);
//
// client.setData("/brfs/wz/test/createEphemeral", "createEphemeral".getBytes());
//
// assertEquals("createEphemeral", new String(client.getData("/brfs/wz/test/createEphemeral")));
//
// client.close();
// }
//
// public void testWatcher() throws Exception {
//
// ExecutorService serverThreads = Executors.newFixedThreadPool(10);
// final CuratorZookeeperClient client = CuratorZookeeperClient.getClientInstance(zkUrl);
// if (!client.checkExists("/brfs/wz/servers")) {
// client.createPersistent("/brfs/wz/servers", true);
// }
// MyWatcher watcher = new MyWatcher(client);
// System.out.println(client.watchedGetChildren("/brfs/wz/servers", watcher));
//
// for (int i = 0; i < 10; i++) {
// final int count = i;
// serverThreads.execute(new Runnable() {
// //
// @Override
// public void run() {
// synchronized (client) {
// client.createEphemeral("/brfs/wz/servers/server" + count, true);
// }
// }
// });
// }
// serverThreads.shutdown();
// serverThreads.awaitTermination(1, TimeUnit.DAYS);
// client.close();
// }
//
// public class MyWatcher implements Watcher {
//
// private final CuratorZookeeperClient client;
//
// public MyWatcher(CuratorZookeeperClient client) {
//
// this.client = client;
// }
//
// @Override
// public void process(WatchedEvent event) {
// if (event.getType() == EventType.NodeChildrenChanged) {
// List<String> tmps = client.watchedGetChildren(event.getPath(), this);
// System.out.println(1111);
// System.out.println(tmps);
// }
// }
//
// }
public void testCuratorListener() throws Exception {
final CuratorClient client = CuratorClient.getClientInstance(zkUrl);
CuratorFramework curatorClient = client.getInnerClient();
// curatorClient.getChildren().inBackground(new BackgroundCallback() {
//
// @Override
// public void processResult(CuratorFramework client, CuratorEvent event) throws Exception {
// System.out.println("aaaaa" + event.getChildren());
// }
// }).forPath("/brfs/wz");
curatorClient.getCuratorListenable().addListener(new CuratorListener() {
@Override
public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception {
System.out.println("CuratorListener1--" + event.getPath() + "--" + event.getWatchedEvent());
}
});
curatorClient.getCuratorListenable().addListener(new CuratorListener() {
@Override
public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception {
System.out.println("CuratorListener2--" + event.getPath() + "--" + event.getWatchedEvent());
}
});
curatorClient.setData().inBackground().forPath("/yupeng/yupeng/yupeng", "aaa".getBytes());
Thread.sleep(2000);
client.close();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project weicoder by wdcode.
the class ZookeeperClient method getDataAsync.
/**
* 异步获取ZK节点数据,同时自动重新注册Watcher.
* @param path 路径
* @param callback 回调
*/
public static void getDataAsync(final String path, final Callback callback) {
// 异步读取数据回调
final BackgroundCallback background = (CuratorFramework client, CuratorEvent event) -> {
try {
callback.callBack(event.getData());
} catch (Exception e) {
Logs.error(e);
}
};
// 每次接收ZK事件都要重新注册Watcher,然后才异步读数据
final Watcher watcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getType() == Event.EventType.NodeDataChanged) {
try {
curatorFramework.getData().usingWatcher(this).inBackground(background).forPath(path);
} catch (Exception e) {
Logs.error(e);
}
}
}
};
// 这里首次注册Watcher并异步读数据
try {
curatorFramework.getData().usingWatcher(watcher).inBackground(background).forPath(path);
} catch (Exception e) {
Logs.error(e);
}
}
Aggregations