use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.
the class SQLMetadataSegmentManager method start.
@LifecycleStart
public void start() {
synchronized (lock) {
if (started) {
return;
}
exec = MoreExecutors.listeningDecorator(Execs.scheduledSingleThreaded("DatabaseSegmentManager-Exec--%d"));
final Duration delay = config.get().getPollDuration().toStandardDuration();
future = exec.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
poll();
} catch (Exception e) {
log.makeAlert(e, "uncaught exception in segment manager polling thread").emit();
}
}
}, 0, delay.getMillis(), TimeUnit.MILLISECONDS);
started = true;
}
}
use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.
the class FileRequestLogger method start.
@LifecycleStart
public void start() {
try {
baseDir.mkdirs();
MutableDateTime mutableDateTime = new DateTime().toMutableDateTime();
mutableDateTime.setMillisOfDay(0);
synchronized (lock) {
currentDay = mutableDateTime.toDateTime();
fileWriter = getFileWriter();
}
long nextDay = currentDay.plusDays(1).getMillis();
Duration delay = new Duration(nextDay - new DateTime().getMillis());
ScheduledExecutors.scheduleWithFixedDelay(exec, delay, Duration.standardDays(1), new Callable<ScheduledExecutors.Signal>() {
@Override
public ScheduledExecutors.Signal call() {
try {
synchronized (lock) {
currentDay = currentDay.plusDays(1);
CloseQuietly.close(fileWriter);
fileWriter = getFileWriter();
}
} catch (Exception e) {
Throwables.propagate(e);
}
return ScheduledExecutors.Signal.REPEAT;
}
});
} catch (IOException e) {
Throwables.propagate(e);
}
}
use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.
the class LookupCoordinatorManager method start.
@LifecycleStart
public void start() {
synchronized (startStopSync) {
if (started) {
return;
}
if (executorService.isShutdown()) {
throw new ISE("Cannot restart after stop!");
}
lookupMapConfigRef = configManager.watch(LOOKUP_CONFIG_KEY, new TypeReference<Map<String, Map<String, Map<String, Object>>>>() {
}, null);
final ListenableScheduledFuture backgroundManagerFuture = this.backgroundManagerFuture = executorService.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
final Map<String, Map<String, Map<String, Object>>> allLookupTiers = lookupMapConfigRef.get();
// Sanity check for if we are shutting down
if (Thread.currentThread().isInterrupted()) {
LOG.info("Not updating lookups because process was interrupted");
return;
}
if (!started) {
LOG.info("Not started. Returning");
return;
}
if (allLookupTiers == null) {
LOG.info("Not updating lookups because no data exists");
return;
}
for (final String tier : allLookupTiers.keySet()) {
try {
final Map<String, Map<String, Object>> allLookups = allLookupTiers.get(tier);
final Map<String, Map<String, Object>> oldLookups = prior_update.get(tier);
final Collection<String> drops;
if (oldLookups == null) {
drops = ImmutableList.of();
} else {
drops = Sets.difference(oldLookups.keySet(), allLookups.keySet());
}
if (allLookupTiers == prior_update) {
LOG.debug("No updates");
updateAllNewOnTier(tier, allLookups);
} else {
updateAllOnTier(tier, allLookups);
deleteAllOnTier(tier, drops);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Throwables.propagate(e);
} catch (Exception e) {
LOG.error(e, "Error updating lookups for tier [%s]. Will try again soon", tier);
}
}
prior_update = allLookupTiers;
}
}, 0, lookupCoordinatorManagerConfig.getPeriod(), TimeUnit.MILLISECONDS);
Futures.addCallback(backgroundManagerFuture, new FutureCallback<Object>() {
@Override
public void onSuccess(@Nullable Object result) {
backgroundManagerExitedLatch.countDown();
LOG.debug("Exited background lookup manager");
}
@Override
public void onFailure(Throwable t) {
backgroundManagerExitedLatch.countDown();
if (backgroundManagerFuture.isCancelled()) {
LOG.info("Background lookup manager exited");
LOG.trace(t, "Background lookup manager exited with throwable");
} else {
LOG.makeAlert(t, "Background lookup manager exited with error!").emit();
}
}
});
started = true;
LOG.debug("Started");
}
}
use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.
the class CoordinatorRuleManager method start.
@LifecycleStart
public void start() {
synchronized (lock) {
if (started) {
return;
}
this.exec = Execs.scheduledSingleThreaded("CoordinatorRuleManager-Exec--%d");
ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(0), config.get().getPollPeriod().toStandardDuration(), new Runnable() {
@Override
public void run() {
poll();
}
});
started = true;
}
}
use of io.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.
the class DruidSchema method start.
@LifecycleStart
public void start() {
cacheExec.submit(new Runnable() {
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final Set<String> dataSources = Sets.newHashSet();
try {
synchronized (lock) {
final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
}
dataSources.addAll(dataSourcesNeedingRefresh);
dataSourcesNeedingRefresh.clear();
lastRefresh = System.currentTimeMillis();
refreshImmediately = false;
}
// Refresh dataSources.
for (final String dataSource : dataSources) {
log.debug("Refreshing metadata for dataSource[%s].", dataSource);
final long startTime = System.currentTimeMillis();
final DruidTable druidTable = computeTable(dataSource);
if (druidTable == null) {
if (tables.remove(dataSource) != null) {
log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
}
} else {
tables.put(dataSource, druidTable);
log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
}
}
initializationLatch.countDown();
} catch (InterruptedException e) {
// Fall through.
throw e;
} catch (Exception e) {
log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
synchronized (lock) {
// Add dataSources back to the refresh list.
dataSourcesNeedingRefresh.addAll(dataSources);
lock.notifyAll();
}
}
}
} catch (InterruptedException e) {
// Just exit.
} catch (Throwable e) {
// Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
// OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
log.makeAlert(e, "Metadata refresh failed permanently").emit();
throw e;
} finally {
log.info("Metadata refresh stopped.");
}
}
});
serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentViewInitialized() {
synchronized (lock) {
isServerViewInitialized = true;
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
if (!tables.containsKey(segment.getDataSource())) {
refreshImmediately = true;
}
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
final List<String> dataSourceNames = Lists.newArrayList();
for (DruidDataSource druidDataSource : server.getDataSources()) {
dataSourceNames.add(druidDataSource.getName());
}
synchronized (lock) {
dataSourcesNeedingRefresh.addAll(dataSourceNames);
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
}
Aggregations