use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project BRFS by zhangnianli.
the class CacheExample method main.
public static void main(String[] args) throws Exception {
CuratorClient client = CuratorClient.getClientInstance("192.168.101.86:2181");
final Set<String> set = new HashSet<String>();
PathChildrenCache cache = new PathChildrenCache(client.getInnerClient(), PATH, true);
cache.start();
PathChildrenCacheListener listener1 = new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
{
System.out.println("1Node added: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
System.out.println("1Node added: " + event.getData().getPath());
break;
}
case CHILD_UPDATED:
{
System.out.println("1Node changed: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
System.out.println("1Node changed: " + new String(event.getData().getData()));
break;
}
case CHILD_REMOVED:
{
System.out.println("1Node removed: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
break;
}
default:
break;
}
}
};
// PathChildrenCacheListener listener2 = new PathChildrenCacheListener() {
//
// @Override
// public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
// switch (event.getType()) {
// case CHILD_ADDED: {
// System.out.println("2Node added: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
// System.out.println("2Node added: " + event.getData().getPath());
// break;
// }
// case CHILD_UPDATED: {
// System.out.println("2Node changed: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
// break;
// }
// case CHILD_REMOVED: {
// System.out.println("2Node removed: " + ZKPaths.getNodeFromPath(event.getData().getPath()));
// break;
// }
// default:
// break;
// }
// }
// };
ExecutorService services = Executors.newFixedThreadPool(5);
cache.getListenable().addListener(listener1, services);
// cache.getListenable().addListener(listener2,services);
// cache.getListenable().addListener(listener);
Thread.sleep(Long.MAX_VALUE);
cache.close();
client.close();
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project chassis by Kixeye.
the class DynamicZookeeperConfigurationSource method initializePathChildrenCache.
private void initializePathChildrenCache() {
pathChildrenCache = new PathChildrenCache(curatorFramework, configPathRoot, true);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
LOGGER.debug("Got event {}", event);
if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED && event.getData().getPath().equals(instanceConfigPath)) {
// do stuff
LOGGER.info("Detected creation of node {}. Initializing zookeeper configuration source...", instanceConfigPath);
try {
initializeZookeeperConfigurationSource();
} catch (BootstrapException e) {
LOGGER.error("Failed to initialized zookeeper configuration source for path " + instanceConfigPath, e);
throw e;
}
return;
}
if (event.getType() == PathChildrenCacheEvent.Type.CHILD_REMOVED && event.getData().getPath().equals(instanceConfigPath)) {
if (running) {
LOGGER.info("Detected deletion of node {}, destroying zookeeper configuration source...", instanceConfigPath);
destroyZookeeperCofigurationSource();
return;
}
LOGGER.warn("Detected deletion of node {}, but zookeeper configuration source not currently running. This should not happen. Ignoring event...", instanceConfigPath);
return;
}
LOGGER.debug("Ignoring event {}", event);
}
});
try {
pathChildrenCache.start(StartMode.BUILD_INITIAL_CACHE);
} catch (Exception e) {
throw new BootstrapException("Failed to initialize zookeeper configuration path cache" + configPathRoot, e);
}
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by druid-io.
the class ZkCoordinator method start.
@LifecycleStart
public void start() throws IOException {
synchronized (lock) {
if (started) {
return;
}
log.info("Starting zkCoordinator for server[%s]", me.getName());
final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName());
final String servedSegmentsLocation = ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName());
final String liveSegmentsLocation = ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName());
loadQueueCache = new PathChildrenCache(curator, loadQueueLocation, true, true, Execs.multiThreaded(config.getNumLoadingThreads(), "ZkCoordinator-%s"));
try {
curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient());
curator.newNamespaceAwareEnsurePath(servedSegmentsLocation).ensure(curator.getZookeeperClient());
curator.newNamespaceAwareEnsurePath(liveSegmentsLocation).ensure(curator.getZookeeperClient());
loadLocalCache();
loadQueueCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
final ChildData child = event.getData();
switch(event.getType()) {
case CHILD_ADDED:
final String path = child.getPath();
final DataSegmentChangeRequest request = jsonMapper.readValue(child.getData(), DataSegmentChangeRequest.class);
log.info("New request[%s] with zNode[%s].", request.asString(), path);
try {
request.go(getDataSegmentChangeHandler(), new DataSegmentChangeCallback() {
boolean hasRun = false;
@Override
public void execute() {
try {
if (!hasRun) {
curator.delete().guaranteed().forPath(path);
log.info("Completed request [%s]", request.asString());
hasRun = true;
}
} catch (Exception e) {
try {
curator.delete().guaranteed().forPath(path);
} catch (Exception e1) {
log.error(e1, "Failed to delete zNode[%s], but ignoring exception.", path);
}
log.error(e, "Exception while removing zNode[%s]", path);
throw Throwables.propagate(e);
}
}
});
} catch (Exception e) {
try {
curator.delete().guaranteed().forPath(path);
} catch (Exception e1) {
log.error(e1, "Failed to delete zNode[%s], but ignoring exception.", path);
}
log.makeAlert(e, "Segment load/unload: uncaught exception.").addData("node", path).addData("nodeProperties", request).emit();
}
break;
case CHILD_REMOVED:
log.info("zNode[%s] was removed", event.getData().getPath());
break;
default:
log.info("Ignoring event[%s]", event);
}
}
});
loadQueueCache.start();
} catch (Exception e) {
Throwables.propagateIfPossible(e, IOException.class);
throw Throwables.propagate(e);
}
started = true;
}
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by druid-io.
the class RemoteTaskRunner method addWorker.
/**
* When a new worker appears, listeners are registered for status changes associated with tasks assigned to
* the worker. Status changes indicate the creation or completion of a task.
* The RemoteTaskRunner updates state according to these changes.
*
* @param worker contains metadata for a worker that has appeared in ZK
*
* @return future that will contain a fully initialized worker
*/
private ListenableFuture<ZkWorker> addWorker(final Worker worker) {
log.info("Worker[%s] reportin' for duty!", worker.getHost());
try {
cancelWorkerCleanup(worker.getHost());
final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), worker.getHost());
final PathChildrenCache statusCache = workerStatusPathChildrenCacheFactory.make(cf, workerStatusPath);
final SettableFuture<ZkWorker> retVal = SettableFuture.create();
final ZkWorker zkWorker = new ZkWorker(worker, statusCache, jsonMapper);
// Add status listener to the watcher for status changes
zkWorker.addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
final String taskId;
final RemoteTaskRunnerWorkItem taskRunnerWorkItem;
synchronized (statusLock) {
try {
switch(event.getType()) {
case CHILD_ADDED:
case CHILD_UPDATED:
taskId = ZKPaths.getNodeFromPath(event.getData().getPath());
final TaskAnnouncement announcement = jsonMapper.readValue(event.getData().getData(), TaskAnnouncement.class);
log.info("Worker[%s] wrote %s status for task [%s] on [%s]", zkWorker.getWorker().getHost(), announcement.getTaskStatus().getStatusCode(), taskId, announcement.getTaskLocation());
// Synchronizing state with ZK
statusLock.notifyAll();
final RemoteTaskRunnerWorkItem tmp;
if ((tmp = runningTasks.get(taskId)) != null) {
taskRunnerWorkItem = tmp;
} else {
final RemoteTaskRunnerWorkItem newTaskRunnerWorkItem = new RemoteTaskRunnerWorkItem(taskId, zkWorker.getWorker(), TaskLocation.unknown());
final RemoteTaskRunnerWorkItem existingItem = runningTasks.putIfAbsent(taskId, newTaskRunnerWorkItem);
if (existingItem == null) {
log.warn("Worker[%s] announced a status for a task I didn't know about, adding to runningTasks: %s", zkWorker.getWorker().getHost(), taskId);
taskRunnerWorkItem = newTaskRunnerWorkItem;
} else {
taskRunnerWorkItem = existingItem;
}
}
if (!announcement.getTaskLocation().equals(taskRunnerWorkItem.getLocation())) {
taskRunnerWorkItem.setLocation(announcement.getTaskLocation());
TaskRunnerUtils.notifyLocationChanged(listeners, taskId, announcement.getTaskLocation());
}
if (announcement.getTaskStatus().isComplete()) {
taskComplete(taskRunnerWorkItem, zkWorker, announcement.getTaskStatus());
runPendingTasks();
}
break;
case CHILD_REMOVED:
taskId = ZKPaths.getNodeFromPath(event.getData().getPath());
taskRunnerWorkItem = runningTasks.remove(taskId);
if (taskRunnerWorkItem != null) {
log.info("Task[%s] just disappeared!", taskId);
taskRunnerWorkItem.setResult(TaskStatus.failure(taskId));
TaskRunnerUtils.notifyStatusChanged(listeners, taskId, TaskStatus.failure(taskId));
} else {
log.info("Task[%s] went bye bye.", taskId);
}
break;
case INITIALIZED:
if (zkWorkers.putIfAbsent(worker.getHost(), zkWorker) == null) {
retVal.set(zkWorker);
} else {
final String message = String.format("WTF?! Tried to add already-existing worker[%s]", worker.getHost());
log.makeAlert(message).addData("workerHost", worker.getHost()).addData("workerIp", worker.getIp()).emit();
retVal.setException(new IllegalStateException(message));
}
runPendingTasks();
}
} catch (Exception e) {
log.makeAlert(e, "Failed to handle new worker status").addData("worker", zkWorker.getWorker().getHost()).addData("znode", event.getData().getPath()).emit();
}
}
}
});
zkWorker.start();
return retVal;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by druid-io.
the class LoadQueuePeonTest method testMultipleLoadDropSegments.
@Test
public void testMultipleLoadDropSegments() throws Exception {
final AtomicInteger requestSignalIdx = new AtomicInteger(0);
final AtomicInteger segmentSignalIdx = new AtomicInteger(0);
loadQueuePeon = new LoadQueuePeon(curator, LOAD_QUEUE_PATH, jsonMapper, Execs.scheduledSingleThreaded("test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("test_load_queue_peon-%d"), new TestDruidCoordinatorConfig(null, null, null, null, null, null, 10, null, false, false, Duration.ZERO));
loadQueuePeon.start();
final CountDownLatch[] loadRequestSignal = new CountDownLatch[5];
final CountDownLatch[] dropRequestSignal = new CountDownLatch[5];
final CountDownLatch[] segmentLoadedSignal = new CountDownLatch[5];
final CountDownLatch[] segmentDroppedSignal = new CountDownLatch[5];
for (int i = 0; i < 5; ++i) {
loadRequestSignal[i] = new CountDownLatch(1);
dropRequestSignal[i] = new CountDownLatch(1);
segmentLoadedSignal[i] = new CountDownLatch(1);
segmentDroppedSignal[i] = new CountDownLatch(1);
}
final DataSegmentChangeHandler handler = new DataSegmentChangeHandler() {
@Override
public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) {
loadRequestSignal[requestSignalIdx.get()].countDown();
}
@Override
public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) {
dropRequestSignal[requestSignalIdx.get()].countDown();
}
};
final List<DataSegment> segmentToDrop = Lists.transform(ImmutableList.<String>of("2014-10-26T00:00:00Z/P1D", "2014-10-25T00:00:00Z/P1D", "2014-10-24T00:00:00Z/P1D", "2014-10-23T00:00:00Z/P1D", "2014-10-22T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
return dataSegmentWithInterval(intervalStr);
}
});
final List<DataSegment> segmentToLoad = Lists.transform(ImmutableList.<String>of("2014-10-27T00:00:00Z/P1D", "2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
return dataSegmentWithInterval(intervalStr);
}
});
// segment with latest interval should be loaded first
final List<DataSegment> expectedLoadOrder = Lists.transform(ImmutableList.<String>of("2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D", "2014-10-27T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
return dataSegmentWithInterval(intervalStr);
}
});
loadQueueCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) {
DataSegmentChangeRequest request = jsonMapper.readValue(event.getData().getData(), DataSegmentChangeRequest.class);
request.go(handler, null);
}
}
});
loadQueueCache.start();
for (DataSegment segment : segmentToDrop) {
loadQueuePeon.dropSegment(segment, new LoadPeonCallback() {
@Override
public void execute() {
segmentDroppedSignal[segmentSignalIdx.get()].countDown();
}
});
}
for (DataSegment segment : segmentToLoad) {
loadQueuePeon.loadSegment(segment, new LoadPeonCallback() {
@Override
public void execute() {
segmentLoadedSignal[segmentSignalIdx.get()].countDown();
}
});
}
Assert.assertEquals(6000, loadQueuePeon.getLoadQueueSize());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToLoad().size());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToDrop().size());
for (DataSegment segment : segmentToDrop) {
String dropRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getIdentifier());
Assert.assertTrue(timing.forWaiting().awaitLatch(dropRequestSignal[requestSignalIdx.get()]));
Assert.assertNotNull(curator.checkExists().forPath(dropRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestDrop) jsonMapper.readValue(curator.getData().decompressed().forPath(dropRequestPath), DataSegmentChangeRequest.class)).getSegment());
if (requestSignalIdx.get() == 4) {
requestSignalIdx.set(0);
} else {
requestSignalIdx.incrementAndGet();
}
// simulate completion of drop request by historical
curator.delete().guaranteed().forPath(dropRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentDroppedSignal[segmentSignalIdx.get()]));
int expectedNumSegmentToDrop = 5 - segmentSignalIdx.get() - 1;
Assert.assertEquals(expectedNumSegmentToDrop, loadQueuePeon.getSegmentsToDrop().size());
if (segmentSignalIdx.get() == 4) {
segmentSignalIdx.set(0);
} else {
segmentSignalIdx.incrementAndGet();
}
}
for (DataSegment segment : expectedLoadOrder) {
String loadRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getIdentifier());
Assert.assertTrue(timing.forWaiting().awaitLatch(loadRequestSignal[requestSignalIdx.get()]));
Assert.assertNotNull(curator.checkExists().forPath(loadRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestLoad) jsonMapper.readValue(curator.getData().decompressed().forPath(loadRequestPath), DataSegmentChangeRequest.class)).getSegment());
requestSignalIdx.incrementAndGet();
// simulate completion of load request by historical
curator.delete().guaranteed().forPath(loadRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentLoadedSignal[segmentSignalIdx.get()]));
int expectedNumSegmentToLoad = 5 - segmentSignalIdx.get() - 1;
Assert.assertEquals(1200 * expectedNumSegmentToLoad, loadQueuePeon.getLoadQueueSize());
Assert.assertEquals(expectedNumSegmentToLoad, loadQueuePeon.getSegmentsToLoad().size());
segmentSignalIdx.incrementAndGet();
}
}
Aggregations