Search in sources :

Example 11 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class ZkCoordinator method start.

@LifecycleStart
public void start() throws IOException {
    synchronized (lock) {
        if (started) {
            return;
        }
        log.info("Starting zkCoordinator for server[%s]", me.getName());
        final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName());
        final String servedSegmentsLocation = ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName());
        final String liveSegmentsLocation = ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName());
        loadQueueCache = new PathChildrenCache(curator, loadQueueLocation, true, true, Execs.multiThreaded(config.getNumLoadingThreads(), "ZkCoordinator-%s"));
        try {
            curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient());
            curator.newNamespaceAwareEnsurePath(servedSegmentsLocation).ensure(curator.getZookeeperClient());
            curator.newNamespaceAwareEnsurePath(liveSegmentsLocation).ensure(curator.getZookeeperClient());
            loadLocalCache();
            loadQueueCache.getListenable().addListener(new PathChildrenCacheListener() {

                @Override
                public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
                    final ChildData child = event.getData();
                    switch(event.getType()) {
                        case CHILD_ADDED:
                            final String path = child.getPath();
                            final DataSegmentChangeRequest request = jsonMapper.readValue(child.getData(), DataSegmentChangeRequest.class);
                            log.info("New request[%s] with zNode[%s].", request.asString(), path);
                            try {
                                request.go(getDataSegmentChangeHandler(), new DataSegmentChangeCallback() {

                                    boolean hasRun = false;

                                    @Override
                                    public void execute() {
                                        try {
                                            if (!hasRun) {
                                                curator.delete().guaranteed().forPath(path);
                                                log.info("Completed request [%s]", request.asString());
                                                hasRun = true;
                                            }
                                        } catch (Exception e) {
                                            try {
                                                curator.delete().guaranteed().forPath(path);
                                            } catch (Exception e1) {
                                                log.error(e1, "Failed to delete zNode[%s], but ignoring exception.", path);
                                            }
                                            log.error(e, "Exception while removing zNode[%s]", path);
                                            throw Throwables.propagate(e);
                                        }
                                    }
                                });
                            } catch (Exception e) {
                                try {
                                    curator.delete().guaranteed().forPath(path);
                                } catch (Exception e1) {
                                    log.error(e1, "Failed to delete zNode[%s], but ignoring exception.", path);
                                }
                                log.makeAlert(e, "Segment load/unload: uncaught exception.").addData("node", path).addData("nodeProperties", request).emit();
                            }
                            break;
                        case CHILD_REMOVED:
                            log.info("zNode[%s] was removed", event.getData().getPath());
                            break;
                        default:
                            log.info("Ignoring event[%s]", event);
                    }
                }
            });
            loadQueueCache.start();
        } catch (Exception e) {
            Throwables.propagateIfPossible(e, IOException.class);
            throw Throwables.propagate(e);
        }
        started = true;
    }
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) ChildData(org.apache.curator.framework.recipes.cache.ChildData) IOException(java.io.IOException) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 12 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class TieredBrokerHostSelector method start.

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }
        try {
            for (Map.Entry<String, String> entry : tierConfig.getTierToBrokerMap().entrySet()) {
                ServerDiscoverySelector selector = serverDiscoveryFactory.createSelector(entry.getValue());
                selector.start();
                selectorMap.put(entry.getValue(), selector);
            }
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
        started = true;
    }
}
Also used : ServerDiscoverySelector(io.druid.curator.discovery.ServerDiscoverySelector) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Map(java.util.Map) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Aggregations

LifecycleStart (io.druid.java.util.common.lifecycle.LifecycleStart)12 IOException (java.io.IOException)7 Duration (org.joda.time.Duration)4 ISE (io.druid.java.util.common.ISE)3 ExecutionException (java.util.concurrent.ExecutionException)3 DateTime (org.joda.time.DateTime)3 MalformedURLException (java.net.MalformedURLException)2 List (java.util.List)2 Map (java.util.Map)2 CuratorFramework (org.apache.curator.framework.CuratorFramework)2 PathChildrenCacheEvent (org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent)2 PathChildrenCacheListener (org.apache.curator.framework.recipes.cache.PathChildrenCacheListener)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 TypeReference (com.fasterxml.jackson.core.type.TypeReference)1 Function (com.google.common.base.Function)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 FutureCallback (com.google.common.util.concurrent.FutureCallback)1 ListenableScheduledFuture (com.google.common.util.concurrent.ListenableScheduledFuture)1 DruidDataSource (io.druid.client.DruidDataSource)1