Search in sources :

Example 1 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class SQLMetadataSegmentManager method start.

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }
        exec = MoreExecutors.listeningDecorator(Execs.scheduledSingleThreaded("DatabaseSegmentManager-Exec--%d"));
        final Duration delay = config.get().getPollDuration().toStandardDuration();
        future = exec.scheduleWithFixedDelay(new Runnable() {

            @Override
            public void run() {
                try {
                    poll();
                } catch (Exception e) {
                    log.makeAlert(e, "uncaught exception in segment manager polling thread").emit();
                }
            }
        }, 0, delay.getMillis(), TimeUnit.MILLISECONDS);
        started = true;
    }
}
Also used : Duration(org.joda.time.Duration) SQLException(java.sql.SQLException) IOException(java.io.IOException) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 2 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class FileRequestLogger method start.

@LifecycleStart
public void start() {
    try {
        baseDir.mkdirs();
        MutableDateTime mutableDateTime = new DateTime().toMutableDateTime();
        mutableDateTime.setMillisOfDay(0);
        synchronized (lock) {
            currentDay = mutableDateTime.toDateTime();
            fileWriter = getFileWriter();
        }
        long nextDay = currentDay.plusDays(1).getMillis();
        Duration delay = new Duration(nextDay - new DateTime().getMillis());
        ScheduledExecutors.scheduleWithFixedDelay(exec, delay, Duration.standardDays(1), new Callable<ScheduledExecutors.Signal>() {

            @Override
            public ScheduledExecutors.Signal call() {
                try {
                    synchronized (lock) {
                        currentDay = currentDay.plusDays(1);
                        CloseQuietly.close(fileWriter);
                        fileWriter = getFileWriter();
                    }
                } catch (Exception e) {
                    Throwables.propagate(e);
                }
                return ScheduledExecutors.Signal.REPEAT;
            }
        });
    } catch (IOException e) {
        Throwables.propagate(e);
    }
}
Also used : MutableDateTime(org.joda.time.MutableDateTime) Duration(org.joda.time.Duration) IOException(java.io.IOException) DateTime(org.joda.time.DateTime) MutableDateTime(org.joda.time.MutableDateTime) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 3 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class LookupCoordinatorManager method start.

@LifecycleStart
public void start() {
    synchronized (startStopSync) {
        if (started) {
            return;
        }
        if (executorService.isShutdown()) {
            throw new ISE("Cannot restart after stop!");
        }
        lookupMapConfigRef = configManager.watch(LOOKUP_CONFIG_KEY, new TypeReference<Map<String, Map<String, Map<String, Object>>>>() {
        }, null);
        final ListenableScheduledFuture backgroundManagerFuture = this.backgroundManagerFuture = executorService.scheduleWithFixedDelay(new Runnable() {

            @Override
            public void run() {
                final Map<String, Map<String, Map<String, Object>>> allLookupTiers = lookupMapConfigRef.get();
                // Sanity check for if we are shutting down
                if (Thread.currentThread().isInterrupted()) {
                    LOG.info("Not updating lookups because process was interrupted");
                    return;
                }
                if (!started) {
                    LOG.info("Not started. Returning");
                    return;
                }
                if (allLookupTiers == null) {
                    LOG.info("Not updating lookups because no data exists");
                    return;
                }
                for (final String tier : allLookupTiers.keySet()) {
                    try {
                        final Map<String, Map<String, Object>> allLookups = allLookupTiers.get(tier);
                        final Map<String, Map<String, Object>> oldLookups = prior_update.get(tier);
                        final Collection<String> drops;
                        if (oldLookups == null) {
                            drops = ImmutableList.of();
                        } else {
                            drops = Sets.difference(oldLookups.keySet(), allLookups.keySet());
                        }
                        if (allLookupTiers == prior_update) {
                            LOG.debug("No updates");
                            updateAllNewOnTier(tier, allLookups);
                        } else {
                            updateAllOnTier(tier, allLookups);
                            deleteAllOnTier(tier, drops);
                        }
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    } catch (Exception e) {
                        LOG.error(e, "Error updating lookups for tier [%s]. Will try again soon", tier);
                    }
                }
                prior_update = allLookupTiers;
            }
        }, 0, lookupCoordinatorManagerConfig.getPeriod(), TimeUnit.MILLISECONDS);
        Futures.addCallback(backgroundManagerFuture, new FutureCallback<Object>() {

            @Override
            public void onSuccess(@Nullable Object result) {
                backgroundManagerExitedLatch.countDown();
                LOG.debug("Exited background lookup manager");
            }

            @Override
            public void onFailure(Throwable t) {
                backgroundManagerExitedLatch.countDown();
                if (backgroundManagerFuture.isCancelled()) {
                    LOG.info("Background lookup manager exited");
                    LOG.trace(t, "Background lookup manager exited with throwable");
                } else {
                    LOG.makeAlert(t, "Background lookup manager exited with error!").emit();
                }
            }
        });
        started = true;
        LOG.debug("Started");
    }
}
Also used : TimeoutException(java.util.concurrent.TimeoutException) MalformedURLException(java.net.MalformedURLException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ListenableScheduledFuture(com.google.common.util.concurrent.ListenableScheduledFuture) ISE(io.druid.java.util.common.ISE) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 4 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class CoordinatorRuleManager method start.

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }
        this.exec = Execs.scheduledSingleThreaded("CoordinatorRuleManager-Exec--%d");
        ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(0), config.get().getPollPeriod().toStandardDuration(), new Runnable() {

            @Override
            public void run() {
                poll();
            }
        });
        started = true;
    }
}
Also used : Duration(org.joda.time.Duration) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 5 with LifecycleStart

use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class DruidSchema method start.

@LifecycleStart
public void start() {
    cacheExec.submit(new Runnable() {

        @Override
        public void run() {
            try {
                while (!Thread.currentThread().isInterrupted()) {
                    final Set<String> dataSources = Sets.newHashSet();
                    try {
                        synchronized (lock) {
                            final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
                            while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
                                lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
                            }
                            dataSources.addAll(dataSourcesNeedingRefresh);
                            dataSourcesNeedingRefresh.clear();
                            lastRefresh = System.currentTimeMillis();
                            refreshImmediately = false;
                        }
                        // Refresh dataSources.
                        for (final String dataSource : dataSources) {
                            log.debug("Refreshing metadata for dataSource[%s].", dataSource);
                            final long startTime = System.currentTimeMillis();
                            final DruidTable druidTable = computeTable(dataSource);
                            if (druidTable == null) {
                                if (tables.remove(dataSource) != null) {
                                    log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
                                }
                            } else {
                                tables.put(dataSource, druidTable);
                                log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
                            }
                        }
                        initializationLatch.countDown();
                    } catch (InterruptedException e) {
                        // Fall through.
                        throw e;
                    } catch (Exception e) {
                        log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
                        synchronized (lock) {
                            // Add dataSources back to the refresh list.
                            dataSourcesNeedingRefresh.addAll(dataSources);
                            lock.notifyAll();
                        }
                    }
                }
            } catch (InterruptedException e) {
            // Just exit.
            } catch (Throwable e) {
                // Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
                // OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
                log.makeAlert(e, "Metadata refresh failed permanently").emit();
                throw e;
            } finally {
                log.info("Metadata refresh stopped.");
            }
        }
    });
    serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {

        @Override
        public ServerView.CallbackAction segmentViewInitialized() {
            synchronized (lock) {
                isServerViewInitialized = true;
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                if (!tables.containsKey(segment.getDataSource())) {
                    refreshImmediately = true;
                }
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }
    });
    serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {

        @Override
        public ServerView.CallbackAction serverRemoved(DruidServer server) {
            final List<String> dataSourceNames = Lists.newArrayList();
            for (DruidDataSource druidDataSource : server.getDataSources()) {
                dataSourceNames.add(druidDataSource.getName());
            }
            synchronized (lock) {
                dataSourcesNeedingRefresh.addAll(dataSourceNames);
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }
    });
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) DruidTable(io.druid.sql.calcite.table.DruidTable) DruidServer(io.druid.client.DruidServer) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) DruidDataSource(io.druid.client.DruidDataSource) DateTime(org.joda.time.DateTime) ServerView(io.druid.client.ServerView) TimelineServerView(io.druid.client.TimelineServerView) List(java.util.List) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Aggregations

LifecycleStart (io.druid.java.util.common.lifecycle.LifecycleStart)12 IOException (java.io.IOException)7 Duration (org.joda.time.Duration)4 ISE (io.druid.java.util.common.ISE)3 ExecutionException (java.util.concurrent.ExecutionException)3 DateTime (org.joda.time.DateTime)3 MalformedURLException (java.net.MalformedURLException)2 List (java.util.List)2 Map (java.util.Map)2 CuratorFramework (org.apache.curator.framework.CuratorFramework)2 PathChildrenCacheEvent (org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent)2 PathChildrenCacheListener (org.apache.curator.framework.recipes.cache.PathChildrenCacheListener)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 TypeReference (com.fasterxml.jackson.core.type.TypeReference)1 Function (com.google.common.base.Function)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 FutureCallback (com.google.common.util.concurrent.FutureCallback)1 ListenableScheduledFuture (com.google.common.util.concurrent.ListenableScheduledFuture)1 DruidDataSource (io.druid.client.DruidDataSource)1