use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project xian by happyyangyuan.
the class PathCacheExample method addListener.
private static void addListener(PathChildrenCache cache) {
// a PathChildrenCacheListener is optional. Here, it's used just to log changes
PathChildrenCacheListener listener = new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
{
System.out.println("Node added: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
break;
}
case CHILD_UPDATED:
{
System.out.println("Node changed: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
break;
}
case CHILD_REMOVED:
{
System.out.println("Node removed: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
break;
}
}
}
};
cache.getListenable().addListener(listener);
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project turbo-rpc by hank-whu.
the class ZooKeeperDiscover method addListener.
@Override
public void addListener(String group, String app, Protocol protocol, final DiscoverListener listener) {
Objects.requireNonNull(listener, "listener is null");
Objects.requireNonNull(client, "call init first");
final String path = "/turbo/" + group + "/" + app + "/" + protocol;
final PathChildrenCache watcher = new PathChildrenCache(client, path, true);
PathChildrenCacheListener pathChildrenCacheListener = new PathChildrenCacheListener() {
private final ConcurrentMap<HostPort, Integer> serverWithWeight = new ConcurrentHashMap<>();
private volatile boolean waitForInitializedEvent = true;
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
if (logger.isInfoEnabled()) {
logger.info("zk监控列表发生变化, " + path + ", " + event.getType());
}
boolean isChanged = true;
switch(event.getType()) {
case INITIALIZED:
waitForInitializedEvent = false;
if (logger.isInfoEnabled()) {
logger.info("完成初始化: " + path);
}
break;
case CHILD_ADDED:
{
AddressWithWeight kv = new AddressWithWeight(event.getData().getData());
serverWithWeight.put(kv.address, kv.weight);
if (logger.isInfoEnabled()) {
logger.info("新增节点: " + kv);
}
break;
}
case CHILD_REMOVED:
{
AddressWithWeight kv = new AddressWithWeight(event.getData().getData());
serverWithWeight.remove(kv.address);
if (logger.isInfoEnabled()) {
logger.info("删除节点: " + kv);
}
break;
}
case CHILD_UPDATED:
{
AddressWithWeight kv = new AddressWithWeight(event.getData().getData());
serverWithWeight.put(kv.address, kv.weight);
if (logger.isInfoEnabled()) {
logger.info("更新节点: " + kv);
}
break;
}
default:
isChanged = false;
if (logger.isInfoEnabled()) {
logger.info("忽略, " + path + ", " + event.getType());
}
}
if (!waitForInitializedEvent && isChanged) {
try {
listener.onChange(serverWithWeight);
} catch (Throwable t) {
if (logger.isWarnEnabled()) {
logger.warn("Discover监听处理失败", t);
}
}
}
}
};
watcher.getListenable().addListener(pathChildrenCacheListener);
try {
watcher.start(StartMode.POST_INITIALIZED_EVENT);
watchers.add(watcher);
} catch (Exception e) {
if (logger.isErrorEnabled()) {
logger.error("zk监听失败, " + path, e);
}
}
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project dble by actiontech.
the class AbstractGeneralListener method childEvent.
@Override
public final void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
case CHILD_REMOVED:
case CHILD_UPDATED:
break;
default:
return;
}
logger.info("event happen in {}, path: {},type: {},data: {}", this.getClass().getSimpleName(), Optional.of(event).map(PathChildrenCacheEvent::getData).map(ChildData::getPath).orElse(null), event.getType(), Optional.of(event).map(PathChildrenCacheEvent::getData).map(ChildData::getData).map(data -> new String(data)).orElse(null));
final ChildData data = event.getData();
if (data == null) {
return;
}
if (data.getData() == null) {
logger.warn("ignore this empty event.{}", event);
return;
}
final String strValue = new String(data.getData());
if (Strings.isEmpty(strValue)) {
logger.warn("ignore this empty event.{}", event);
return;
}
final ClusterEvent<T> newEvent;
ClusterEvent<T> oldEvent = null;
final ClusterValue<T> newValue = ClusterValue.readFromJson(strValue, pathMeta.getChildClass());
final String path = data.getPath();
switch(event.getType()) {
case CHILD_ADDED:
newEvent = new ClusterEvent<>(path, newValue, ChangeType.ADDED);
keyCacheMap.put(path, newValue);
break;
case CHILD_REMOVED:
newEvent = new ClusterEvent<>(path, newValue, ChangeType.REMOVED);
keyCacheMap.remove(path);
break;
case CHILD_UPDATED:
/**
* update event are split into two event.
* remove the old and add the new
*/
newEvent = new ClusterEvent<>(path, newValue, ChangeType.ADDED);
newEvent.markUpdate();
final ClusterValue<T> oldValue = keyCacheMap.get(path);
if (oldValue == null) {
logger.error("miss previous message for UPDATE");
} else {
oldEvent = new ClusterEvent<>(path, oldValue, ChangeType.REMOVED);
oldEvent.markUpdate();
}
keyCacheMap.put(path, newValue);
break;
default:
return;
}
if (oldEvent != null) {
try {
onEvent0(oldEvent);
} catch (Exception e) {
logger.info("", e);
}
}
try {
onEvent0(newEvent);
} catch (Exception e) {
logger.info("", e);
}
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by apache.
the class CuratorDruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(4);
segmentRemovedLatch = new CountDownLatch(0);
CountDownLatch destCountdown = new CountDownLatch(1);
CountDownLatch srcCountdown = new CountDownLatch(1);
setupView();
DruidServer source = new DruidServer("localhost:1", "localhost:1", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
DruidServer dest = new DruidServer("localhost:2", "localhost:2", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
setupZNodeForServer(source, zkPathsConfig, jsonMapper);
setupZNodeForServer(dest, zkPathsConfig, jsonMapper);
final List<DataSegment> sourceSegments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-06/2011-04-09", "v1")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
final List<DataSegment> destinationSegments = Lists.transform(ImmutableList.of(Pair.of("2011-03-31/2011-04-01", "v1")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
DataSegment segmentToMove = sourceSegments.get(2);
List<String> sourceSegKeys = new ArrayList<>();
for (DataSegment segment : sourceSegments) {
sourceSegKeys.add(announceBatchSegmentsForServer(source, ImmutableSet.of(segment), zkPathsConfig, jsonMapper));
}
for (DataSegment segment : destinationSegments) {
announceBatchSegmentsForServer(dest, ImmutableSet.of(segment), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
// these child watchers are used to simulate actions of historicals, announcing a segment on noticing a load queue
// for the destination and unannouncing from source server when noticing a drop request
sourceLoadQueueChildrenCache.getListenable().addListener((CuratorFramework curatorFramework, PathChildrenCacheEvent event) -> {
if (event.getType().equals(PathChildrenCacheEvent.Type.INITIALIZED)) {
srcCountdown.countDown();
} else if (CuratorUtils.isChildAdded(event)) {
// Simulate source server dropping segment
unannounceSegmentFromBatchForServer(source, segmentToMove, sourceSegKeys.get(2), zkPathsConfig);
}
});
destinationLoadQueueChildrenCache.getListenable().addListener((CuratorFramework curatorFramework, PathChildrenCacheEvent event) -> {
if (event.getType().equals(PathChildrenCacheEvent.Type.INITIALIZED)) {
destCountdown.countDown();
} else if (CuratorUtils.isChildAdded(event)) {
// Simulate destination server loading segment
announceBatchSegmentsForServer(dest, ImmutableSet.of(segmentToMove), zkPathsConfig, jsonMapper);
}
});
sourceLoadQueueChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
destinationLoadQueueChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
Assert.assertTrue(timing.forWaiting().awaitLatch(srcCountdown));
Assert.assertTrue(timing.forWaiting().awaitLatch(destCountdown));
loadManagementPeons.put("localhost:1", sourceLoadQueuePeon);
loadManagementPeons.put("localhost:2", destinationLoadQueuePeon);
segmentRemovedLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(1);
ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(sourceSegments.get(2));
EasyMock.replay(druidDataSource);
EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
EasyMock.expect(coordinatorRuntimeParams.getDataSourcesSnapshot()).andReturn(dataSourcesSnapshot).anyTimes();
EasyMock.replay(segmentsMetadataManager, coordinatorRuntimeParams);
EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
EasyMock.replay(dataSourcesSnapshot);
coordinator.moveSegment(coordinatorRuntimeParams, source.toImmutableDruidServer(), dest.toImmutableDruidServer(), sourceSegments.get(2), null);
// wait for destination server to load segment
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
// remove load queue key from destination server to trigger adding drop to load queue
curator.delete().guaranteed().forPath(ZKPaths.makePath(DESTINATION_LOAD_PATH, segmentToMove.getId().toString()));
// wait for drop
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// clean up drop from load queue
curator.delete().guaranteed().forPath(ZKPaths.makePath(SOURCE_LOAD_PATH, segmentToMove.getId().toString()));
List<DruidServer> servers = new ArrayList<>(serverView.getInventory());
Assert.assertEquals(2, servers.get(0).getTotalSegments());
Assert.assertEquals(2, servers.get(1).getTotalSegments());
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by apache.
the class DruidCoordinatorTest method createCountDownLatchAndSetPathChildrenCacheListenerWithLatch.
private CountDownLatch createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(int latchCount, PathChildrenCache pathChildrenCache, Map<String, DataSegment> segments, DruidServer server) {
final CountDownLatch countDownLatch = new CountDownLatch(latchCount);
pathChildrenCache.getListenable().addListener((CuratorFramework client, PathChildrenCacheEvent event) -> {
if (CuratorUtils.isChildAdded(event)) {
DataSegment segment = findSegmentRelatedToCuratorEvent(segments, event);
if (segment != null && server.getSegment(segment.getId()) == null) {
if (countDownLatch.getCount() > 0) {
server.addDataSegment(segment);
curator.delete().guaranteed().forPath(event.getData().getPath());
countDownLatch.countDown();
} else {
Assert.fail("The segment path " + event.getData().getPath() + " is not expected");
}
}
}
});
return countDownLatch;
}
Aggregations