Search in sources :

Example 6 with PathChildrenCacheEvent

use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project exhibitor by soabase.

the class TestZookeeperConfigProvider method testConcurrentModification.

@Test
public void testConcurrentModification() throws Exception {
    ZookeeperConfigProvider config1 = new ZookeeperConfigProvider(client, "/foo", new Properties(), "foo");
    ZookeeperConfigProvider config2 = new ZookeeperConfigProvider(client, "/foo", new Properties(), "foo");
    try {
        config1.start();
        config2.start();
        final Semaphore cacheUpdate2 = new Semaphore(0);
        config2.getPathChildrenCache().getListenable().addListener(new PathChildrenCacheListener() {

            @Override
            public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
                cacheUpdate2.release();
            }
        });
        Properties properties = new Properties();
        properties.setProperty(PropertyBasedInstanceConfig.toName(StringConfigs.ZOO_CFG_EXTRA, PropertyBasedInstanceConfig.ROOT_PROPERTY_PREFIX), "1,2,3");
        LoadedInstanceConfig loaded1 = config1.storeConfig(new PropertyBasedInstanceConfig(properties, new Properties()), -1);
        Assert.assertTrue(timing.acquireSemaphore(cacheUpdate2));
        timing.sleepABit();
        LoadedInstanceConfig loaded2 = config2.loadConfig();
        Assert.assertEquals("1,2,3", loaded2.getConfig().getRootConfig().getString(StringConfigs.ZOO_CFG_EXTRA));
        properties.setProperty(PropertyBasedInstanceConfig.toName(StringConfigs.ZOO_CFG_EXTRA, PropertyBasedInstanceConfig.ROOT_PROPERTY_PREFIX), "4,5,6");
        config2.storeConfig(new PropertyBasedInstanceConfig(properties, new Properties()), loaded2.getVersion());
        Assert.assertNull(config1.storeConfig(new PropertyBasedInstanceConfig(properties, new Properties()), loaded1.getVersion()));
        LoadedInstanceConfig newLoaded1 = config1.loadConfig();
        Assert.assertNotEquals(loaded1.getVersion(), newLoaded1.getVersion());
    } finally {
        CloseableUtils.closeQuietly(config2);
        CloseableUtils.closeQuietly(config1);
    }
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) PropertyBasedInstanceConfig(com.netflix.exhibitor.core.config.PropertyBasedInstanceConfig) Semaphore(java.util.concurrent.Semaphore) Properties(java.util.Properties) LoadedInstanceConfig(com.netflix.exhibitor.core.config.LoadedInstanceConfig) Test(org.testng.annotations.Test)

Example 7 with PathChildrenCacheEvent

use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project drill by apache.

the class EventDispatcher method childEvent.

@Override
public void childEvent(final CuratorFramework client, final PathChildrenCacheEvent event) throws Exception {
    final PathChildrenCacheEvent.Type original = event.getType();
    final TransientStoreEventType mapped = MAPPINGS.get(original);
    if (mapped != null) {
        // dispatch the event to listeners only if it can be mapped
        final String path = event.getData().getPath();
        final byte[] bytes = event.getData().getData();
        final V value = store.getConfig().getSerializer().deserialize(bytes);
        store.fireListeners(TransientStoreEvent.of(mapped, path, value));
    }
}
Also used : PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) TransientStoreEventType(org.apache.drill.exec.coord.store.TransientStoreEventType)

Example 8 with PathChildrenCacheEvent

use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by druid-io.

the class Announcer method announce.

/**
   * Announces the provided bytes at the given path.  Announcement means that it will create an ephemeral node
   * and monitor it to make sure that it always exists until it is unannounced or this object is closed.
   *
   * @param path                  The path to announce at
   * @param bytes                 The payload to announce
   * @param removeParentIfCreated remove parent of "path" if we had created that parent
   */
public void announce(String path, byte[] bytes, boolean removeParentIfCreated) {
    synchronized (toAnnounce) {
        if (!started) {
            toAnnounce.add(new Announceable(path, bytes, removeParentIfCreated));
            return;
        }
    }
    final ZKPaths.PathAndNode pathAndNode = ZKPaths.getPathAndNode(path);
    final String parentPath = pathAndNode.getPath();
    boolean buildParentPath = false;
    ConcurrentMap<String, byte[]> subPaths = announcements.get(parentPath);
    if (subPaths == null) {
        try {
            if (curator.checkExists().forPath(parentPath) == null) {
                buildParentPath = true;
            }
        } catch (Exception e) {
            log.debug(e, "Problem checking if the parent existed, ignoring.");
        }
        // I don't have a watcher on this path yet, create a Map and start watching.
        announcements.putIfAbsent(parentPath, new MapMaker().<String, byte[]>makeMap());
        // Guaranteed to be non-null, but might be a map put in there by another thread.
        final ConcurrentMap<String, byte[]> finalSubPaths = announcements.get(parentPath);
        // Synchronize to make sure that I only create a listener once.
        synchronized (finalSubPaths) {
            if (!listeners.containsKey(parentPath)) {
                final PathChildrenCache cache = factory.make(curator, parentPath);
                cache.getListenable().addListener(new PathChildrenCacheListener() {

                    private final AtomicReference<Set<String>> pathsLost = new AtomicReference<Set<String>>(null);

                    @Override
                    public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
                        log.debug("Path[%s] got event[%s]", parentPath, event);
                        switch(event.getType()) {
                            case CHILD_REMOVED:
                                final ChildData child = event.getData();
                                final ZKPaths.PathAndNode childPath = ZKPaths.getPathAndNode(child.getPath());
                                final byte[] value = finalSubPaths.get(childPath.getNode());
                                if (value != null) {
                                    log.info("Node[%s] dropped, reinstating.", child.getPath());
                                    createAnnouncement(child.getPath(), value);
                                }
                                break;
                            case CONNECTION_LOST:
                                // Lost connection, which means session is broken, take inventory of what has been seen.
                                // This is to protect from a race condition in which the ephemeral node could have been
                                // created but not actually seen by the PathChildrenCache, which means that it won't know
                                // that it disappeared and thus will not generate a CHILD_REMOVED event for us.  Under normal
                                // circumstances, this can only happen upon connection loss; but technically if you have
                                // an adversary in the system, they could also delete the ephemeral node before the cache sees
                                // it.  This does not protect from that case, so don't have adversaries.
                                Set<String> pathsToReinstate = Sets.newHashSet();
                                for (String node : finalSubPaths.keySet()) {
                                    String path = ZKPaths.makePath(parentPath, node);
                                    log.info("Node[%s] is added to reinstate.", path);
                                    pathsToReinstate.add(path);
                                }
                                if (!pathsToReinstate.isEmpty() && !pathsLost.compareAndSet(null, pathsToReinstate)) {
                                    log.info("Already had a pathsLost set!?[%s]", parentPath);
                                }
                                break;
                            case CONNECTION_RECONNECTED:
                                final Set<String> thePathsLost = pathsLost.getAndSet(null);
                                if (thePathsLost != null) {
                                    for (String path : thePathsLost) {
                                        log.info("Reinstating [%s]", path);
                                        final ZKPaths.PathAndNode split = ZKPaths.getPathAndNode(path);
                                        createAnnouncement(path, announcements.get(split.getPath()).get(split.getNode()));
                                    }
                                }
                                break;
                        }
                    }
                });
                synchronized (toAnnounce) {
                    if (started) {
                        if (buildParentPath) {
                            createPath(parentPath, removeParentIfCreated);
                        }
                        startCache(cache);
                        listeners.put(parentPath, cache);
                    }
                }
            }
        }
        subPaths = finalSubPaths;
    }
    boolean created = false;
    synchronized (toAnnounce) {
        if (started) {
            byte[] oldBytes = subPaths.putIfAbsent(pathAndNode.getNode(), bytes);
            if (oldBytes == null) {
                created = true;
            } else if (!Arrays.equals(oldBytes, bytes)) {
                throw new IAE("Cannot reannounce different values under the same path");
            }
        }
    }
    if (created) {
        try {
            createAnnouncement(path, bytes);
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
    }
}
Also used : Set(java.util.Set) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) MapMaker(com.google.common.collect.MapMaker) AtomicReference(java.util.concurrent.atomic.AtomicReference) IAE(io.druid.java.util.common.IAE) KeeperException(org.apache.zookeeper.KeeperException) CuratorFramework(org.apache.curator.framework.CuratorFramework) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) ChildData(org.apache.curator.framework.recipes.cache.ChildData) ZKPaths(org.apache.curator.utils.ZKPaths)

Example 9 with PathChildrenCacheEvent

use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by druid-io.

the class RemoteTaskRunner method addWorker.

/**
   * When a new worker appears, listeners are registered for status changes associated with tasks assigned to
   * the worker. Status changes indicate the creation or completion of a task.
   * The RemoteTaskRunner updates state according to these changes.
   *
   * @param worker contains metadata for a worker that has appeared in ZK
   *
   * @return future that will contain a fully initialized worker
   */
private ListenableFuture<ZkWorker> addWorker(final Worker worker) {
    log.info("Worker[%s] reportin' for duty!", worker.getHost());
    try {
        cancelWorkerCleanup(worker.getHost());
        final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), worker.getHost());
        final PathChildrenCache statusCache = workerStatusPathChildrenCacheFactory.make(cf, workerStatusPath);
        final SettableFuture<ZkWorker> retVal = SettableFuture.create();
        final ZkWorker zkWorker = new ZkWorker(worker, statusCache, jsonMapper);
        // Add status listener to the watcher for status changes
        zkWorker.addListener(new PathChildrenCacheListener() {

            @Override
            public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
                final String taskId;
                final RemoteTaskRunnerWorkItem taskRunnerWorkItem;
                synchronized (statusLock) {
                    try {
                        switch(event.getType()) {
                            case CHILD_ADDED:
                            case CHILD_UPDATED:
                                taskId = ZKPaths.getNodeFromPath(event.getData().getPath());
                                final TaskAnnouncement announcement = jsonMapper.readValue(event.getData().getData(), TaskAnnouncement.class);
                                log.info("Worker[%s] wrote %s status for task [%s] on [%s]", zkWorker.getWorker().getHost(), announcement.getTaskStatus().getStatusCode(), taskId, announcement.getTaskLocation());
                                // Synchronizing state with ZK
                                statusLock.notifyAll();
                                final RemoteTaskRunnerWorkItem tmp;
                                if ((tmp = runningTasks.get(taskId)) != null) {
                                    taskRunnerWorkItem = tmp;
                                } else {
                                    final RemoteTaskRunnerWorkItem newTaskRunnerWorkItem = new RemoteTaskRunnerWorkItem(taskId, zkWorker.getWorker(), TaskLocation.unknown());
                                    final RemoteTaskRunnerWorkItem existingItem = runningTasks.putIfAbsent(taskId, newTaskRunnerWorkItem);
                                    if (existingItem == null) {
                                        log.warn("Worker[%s] announced a status for a task I didn't know about, adding to runningTasks: %s", zkWorker.getWorker().getHost(), taskId);
                                        taskRunnerWorkItem = newTaskRunnerWorkItem;
                                    } else {
                                        taskRunnerWorkItem = existingItem;
                                    }
                                }
                                if (!announcement.getTaskLocation().equals(taskRunnerWorkItem.getLocation())) {
                                    taskRunnerWorkItem.setLocation(announcement.getTaskLocation());
                                    TaskRunnerUtils.notifyLocationChanged(listeners, taskId, announcement.getTaskLocation());
                                }
                                if (announcement.getTaskStatus().isComplete()) {
                                    taskComplete(taskRunnerWorkItem, zkWorker, announcement.getTaskStatus());
                                    runPendingTasks();
                                }
                                break;
                            case CHILD_REMOVED:
                                taskId = ZKPaths.getNodeFromPath(event.getData().getPath());
                                taskRunnerWorkItem = runningTasks.remove(taskId);
                                if (taskRunnerWorkItem != null) {
                                    log.info("Task[%s] just disappeared!", taskId);
                                    taskRunnerWorkItem.setResult(TaskStatus.failure(taskId));
                                    TaskRunnerUtils.notifyStatusChanged(listeners, taskId, TaskStatus.failure(taskId));
                                } else {
                                    log.info("Task[%s] went bye bye.", taskId);
                                }
                                break;
                            case INITIALIZED:
                                if (zkWorkers.putIfAbsent(worker.getHost(), zkWorker) == null) {
                                    retVal.set(zkWorker);
                                } else {
                                    final String message = String.format("WTF?! Tried to add already-existing worker[%s]", worker.getHost());
                                    log.makeAlert(message).addData("workerHost", worker.getHost()).addData("workerIp", worker.getIp()).emit();
                                    retVal.setException(new IllegalStateException(message));
                                }
                                runPendingTasks();
                        }
                    } catch (Exception e) {
                        log.makeAlert(e, "Failed to handle new worker status").addData("worker", zkWorker.getWorker().getHost()).addData("znode", event.getData().getPath()).emit();
                    }
                }
            }
        });
        zkWorker.start();
        return retVal;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) TaskAnnouncement(io.druid.indexing.worker.TaskAnnouncement) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) KeeperException(org.apache.zookeeper.KeeperException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 10 with PathChildrenCacheEvent

use of org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent in project druid by druid-io.

the class LoadQueuePeonTest method testMultipleLoadDropSegments.

@Test
public void testMultipleLoadDropSegments() throws Exception {
    final AtomicInteger requestSignalIdx = new AtomicInteger(0);
    final AtomicInteger segmentSignalIdx = new AtomicInteger(0);
    loadQueuePeon = new LoadQueuePeon(curator, LOAD_QUEUE_PATH, jsonMapper, Execs.scheduledSingleThreaded("test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("test_load_queue_peon-%d"), new TestDruidCoordinatorConfig(null, null, null, null, null, null, 10, null, false, false, Duration.ZERO));
    loadQueuePeon.start();
    final CountDownLatch[] loadRequestSignal = new CountDownLatch[5];
    final CountDownLatch[] dropRequestSignal = new CountDownLatch[5];
    final CountDownLatch[] segmentLoadedSignal = new CountDownLatch[5];
    final CountDownLatch[] segmentDroppedSignal = new CountDownLatch[5];
    for (int i = 0; i < 5; ++i) {
        loadRequestSignal[i] = new CountDownLatch(1);
        dropRequestSignal[i] = new CountDownLatch(1);
        segmentLoadedSignal[i] = new CountDownLatch(1);
        segmentDroppedSignal[i] = new CountDownLatch(1);
    }
    final DataSegmentChangeHandler handler = new DataSegmentChangeHandler() {

        @Override
        public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) {
            loadRequestSignal[requestSignalIdx.get()].countDown();
        }

        @Override
        public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) {
            dropRequestSignal[requestSignalIdx.get()].countDown();
        }
    };
    final List<DataSegment> segmentToDrop = Lists.transform(ImmutableList.<String>of("2014-10-26T00:00:00Z/P1D", "2014-10-25T00:00:00Z/P1D", "2014-10-24T00:00:00Z/P1D", "2014-10-23T00:00:00Z/P1D", "2014-10-22T00:00:00Z/P1D"), new Function<String, DataSegment>() {

        @Override
        public DataSegment apply(String intervalStr) {
            return dataSegmentWithInterval(intervalStr);
        }
    });
    final List<DataSegment> segmentToLoad = Lists.transform(ImmutableList.<String>of("2014-10-27T00:00:00Z/P1D", "2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D"), new Function<String, DataSegment>() {

        @Override
        public DataSegment apply(String intervalStr) {
            return dataSegmentWithInterval(intervalStr);
        }
    });
    // segment with latest interval should be loaded first
    final List<DataSegment> expectedLoadOrder = Lists.transform(ImmutableList.<String>of("2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D", "2014-10-27T00:00:00Z/P1D"), new Function<String, DataSegment>() {

        @Override
        public DataSegment apply(String intervalStr) {
            return dataSegmentWithInterval(intervalStr);
        }
    });
    loadQueueCache.getListenable().addListener(new PathChildrenCacheListener() {

        @Override
        public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
            if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) {
                DataSegmentChangeRequest request = jsonMapper.readValue(event.getData().getData(), DataSegmentChangeRequest.class);
                request.go(handler, null);
            }
        }
    });
    loadQueueCache.start();
    for (DataSegment segment : segmentToDrop) {
        loadQueuePeon.dropSegment(segment, new LoadPeonCallback() {

            @Override
            public void execute() {
                segmentDroppedSignal[segmentSignalIdx.get()].countDown();
            }
        });
    }
    for (DataSegment segment : segmentToLoad) {
        loadQueuePeon.loadSegment(segment, new LoadPeonCallback() {

            @Override
            public void execute() {
                segmentLoadedSignal[segmentSignalIdx.get()].countDown();
            }
        });
    }
    Assert.assertEquals(6000, loadQueuePeon.getLoadQueueSize());
    Assert.assertEquals(5, loadQueuePeon.getSegmentsToLoad().size());
    Assert.assertEquals(5, loadQueuePeon.getSegmentsToDrop().size());
    for (DataSegment segment : segmentToDrop) {
        String dropRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getIdentifier());
        Assert.assertTrue(timing.forWaiting().awaitLatch(dropRequestSignal[requestSignalIdx.get()]));
        Assert.assertNotNull(curator.checkExists().forPath(dropRequestPath));
        Assert.assertEquals(segment, ((SegmentChangeRequestDrop) jsonMapper.readValue(curator.getData().decompressed().forPath(dropRequestPath), DataSegmentChangeRequest.class)).getSegment());
        if (requestSignalIdx.get() == 4) {
            requestSignalIdx.set(0);
        } else {
            requestSignalIdx.incrementAndGet();
        }
        // simulate completion of drop request by historical
        curator.delete().guaranteed().forPath(dropRequestPath);
        Assert.assertTrue(timing.forWaiting().awaitLatch(segmentDroppedSignal[segmentSignalIdx.get()]));
        int expectedNumSegmentToDrop = 5 - segmentSignalIdx.get() - 1;
        Assert.assertEquals(expectedNumSegmentToDrop, loadQueuePeon.getSegmentsToDrop().size());
        if (segmentSignalIdx.get() == 4) {
            segmentSignalIdx.set(0);
        } else {
            segmentSignalIdx.incrementAndGet();
        }
    }
    for (DataSegment segment : expectedLoadOrder) {
        String loadRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getIdentifier());
        Assert.assertTrue(timing.forWaiting().awaitLatch(loadRequestSignal[requestSignalIdx.get()]));
        Assert.assertNotNull(curator.checkExists().forPath(loadRequestPath));
        Assert.assertEquals(segment, ((SegmentChangeRequestLoad) jsonMapper.readValue(curator.getData().decompressed().forPath(loadRequestPath), DataSegmentChangeRequest.class)).getSegment());
        requestSignalIdx.incrementAndGet();
        // simulate completion of load request by historical
        curator.delete().guaranteed().forPath(loadRequestPath);
        Assert.assertTrue(timing.forWaiting().awaitLatch(segmentLoadedSignal[segmentSignalIdx.get()]));
        int expectedNumSegmentToLoad = 5 - segmentSignalIdx.get() - 1;
        Assert.assertEquals(1200 * expectedNumSegmentToLoad, loadQueuePeon.getLoadQueueSize());
        Assert.assertEquals(expectedNumSegmentToLoad, loadQueuePeon.getSegmentsToLoad().size());
        segmentSignalIdx.incrementAndGet();
    }
}
Also used : PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) DataSegmentChangeRequest(io.druid.server.coordination.DataSegmentChangeRequest) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegmentChangeHandler(io.druid.server.coordination.DataSegmentChangeHandler) DataSegment(io.druid.timeline.DataSegment) CuratorFramework(org.apache.curator.framework.CuratorFramework) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DataSegmentChangeCallback(io.druid.server.coordination.DataSegmentChangeCallback) Test(org.junit.Test)

Aggregations

PathChildrenCacheEvent (org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent)13 CuratorFramework (org.apache.curator.framework.CuratorFramework)11 PathChildrenCacheListener (org.apache.curator.framework.recipes.cache.PathChildrenCacheListener)11 IOException (java.io.IOException)6 PathChildrenCache (org.apache.curator.framework.recipes.cache.PathChildrenCache)5 KeeperException (org.apache.zookeeper.KeeperException)4 Test (org.junit.Test)4 DataSegment (io.druid.timeline.DataSegment)3 CountDownLatch (java.util.concurrent.CountDownLatch)3 ExecutionException (java.util.concurrent.ExecutionException)3 ChildData (org.apache.curator.framework.recipes.cache.ChildData)3 LifecycleStart (io.druid.java.util.common.lifecycle.LifecycleStart)2 MalformedURLException (java.net.MalformedURLException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 NoNodeException (org.apache.zookeeper.KeeperException.NoNodeException)2 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 MapMaker (com.google.common.collect.MapMaker)1 FutureCallback (com.google.common.util.concurrent.FutureCallback)1