Search in sources :

Example 6 with LifecycleStart

use of org.apache.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class HdfsStorageAuthentication method authenticate.

/**
 * Dose authenticate against a secured hadoop cluster
 * In case of any bug fix make sure to fix the code in JobHelper#authenticate as well.
 */
@LifecycleStart
public void authenticate() {
    String principal = hdfsKerberosConfig.getPrincipal();
    String keytab = hdfsKerberosConfig.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    log.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }
            } catch (IOException e) {
                throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab);
            }
        }
    }
}
Also used : ISE(org.apache.druid.java.util.common.ISE) IOException(java.io.IOException) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Example 7 with LifecycleStart

use of org.apache.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class ConfigManager method start.

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }
        poller = new PollingCallable();
        ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(0), config.get().getPollDuration().toStandardDuration(), poller);
        started = true;
    }
}
Also used : Duration(org.joda.time.Duration) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Example 8 with LifecycleStart

use of org.apache.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class LookupReferencesManager method start.

@LifecycleStart
public void start() throws IOException {
    if (!lifecycleLock.canStart()) {
        throw new ISE("can't start.");
    }
    try {
        LOG.debug("LookupExtractorFactoryContainerProvider starting.");
        if (!Strings.isNullOrEmpty(lookupConfig.getSnapshotWorkingDir())) {
            FileUtils.mkdirp(new File(lookupConfig.getSnapshotWorkingDir()));
        }
        loadAllLookupsAndInitStateRef();
        if (!testMode) {
            mainThread = Execs.makeThread("LookupExtractorFactoryContainerProvider-MainThread", () -> {
                try {
                    if (!lifecycleLock.awaitStarted()) {
                        LOG.error("Lifecycle not started, lookup update notices will not be handled.");
                        return;
                    }
                    while (!Thread.interrupted() && lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)) {
                        try {
                            handlePendingNotices();
                            LockSupport.parkNanos(LookupReferencesManager.this, TimeUnit.MINUTES.toNanos(1));
                        } catch (Throwable t) {
                            LOG.makeAlert(t, "Error occured while lookup notice handling.").emit();
                        }
                    }
                } catch (Throwable t) {
                    LOG.error(t, "Error while waiting for lifecycle start. lookup updates notices will not be handled");
                } finally {
                    LOG.info("Lookup Management loop exited. Lookup notices are not handled anymore.");
                }
            }, true);
            mainThread.start();
        }
        LOG.debug("LookupExtractorFactoryContainerProvider started.");
        lifecycleLock.started();
    } finally {
        lifecycleLock.exitStart();
    }
}
Also used : ISE(org.apache.druid.java.util.common.ISE) File(java.io.File) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Example 9 with LifecycleStart

use of org.apache.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class CoordinatorPollingBasicAuthorizerCacheManager method start.

@LifecycleStart
public void start() {
    if (!lifecycleLock.canStart()) {
        throw new ISE("can't start.");
    }
    LOG.info("Starting CoordinatorPollingBasicAuthorizerCacheManager.");
    try {
        initUserMaps();
        ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(commonCacheConfig.getPollingPeriod()), new Duration(commonCacheConfig.getPollingPeriod()), () -> {
            try {
                long randomDelay = ThreadLocalRandom.current().nextLong(0, commonCacheConfig.getMaxRandomDelay());
                LOG.debug("Inserting random polling delay of [%s] ms", randomDelay);
                Thread.sleep(randomDelay);
                LOG.debug("Scheduled userMap cache poll is running");
                for (String authorizerPrefix : authorizerPrefixes) {
                    UserAndRoleMap userAndRoleMap = fetchUserAndRoleMapFromCoordinator(authorizerPrefix, false);
                    if (userAndRoleMap != null) {
                        cachedUserMaps.put(authorizerPrefix, userAndRoleMap.getUserMap());
                        cachedRoleMaps.put(authorizerPrefix, userAndRoleMap.getRoleMap());
                    }
                }
                LOG.debug("Scheduled userMap cache poll is done");
            } catch (Throwable t) {
                LOG.makeAlert(t, "Error occured while polling for cachedUserMaps.").emit();
            }
        });
        ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(commonCacheConfig.getPollingPeriod()), new Duration(commonCacheConfig.getPollingPeriod()), () -> {
            try {
                long randomDelay = ThreadLocalRandom.current().nextLong(0, commonCacheConfig.getMaxRandomDelay());
                LOG.debug("Inserting random polling delay of [%s] ms", randomDelay);
                Thread.sleep(randomDelay);
                LOG.debug("Scheduled groupMappingMap cache poll is running");
                for (String authorizerPrefix : authorizerPrefixes) {
                    GroupMappingAndRoleMap groupMappingAndRoleMap = fetchGroupAndRoleMapFromCoordinator(authorizerPrefix, false);
                    if (groupMappingAndRoleMap != null) {
                        cachedGroupMappingMaps.put(authorizerPrefix, groupMappingAndRoleMap.getGroupMappingMap());
                        cachedGroupMappingRoleMaps.put(authorizerPrefix, groupMappingAndRoleMap.getRoleMap());
                    }
                }
                LOG.debug("Scheduled groupMappingMap cache poll is done");
            } catch (Throwable t) {
                LOG.makeAlert(t, "Error occured while polling for cachedGroupMappingMaps.").emit();
            }
        });
        lifecycleLock.started();
        LOG.info("Started CoordinatorPollingBasicAuthorizerCacheManager.");
    } finally {
        lifecycleLock.exitStart();
    }
}
Also used : UserAndRoleMap(org.apache.druid.security.basic.authorization.entity.UserAndRoleMap) GroupMappingAndRoleMap(org.apache.druid.security.basic.authorization.entity.GroupMappingAndRoleMap) ISE(org.apache.druid.java.util.common.ISE) Duration(org.joda.time.Duration) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Example 10 with LifecycleStart

use of org.apache.druid.java.util.common.lifecycle.LifecycleStart in project druid by druid-io.

the class RemoteTaskRunner method start.

@Override
@LifecycleStart
public void start() {
    if (!lifecycleLock.canStart()) {
        return;
    }
    try {
        log.info("Starting RemoteTaskRunner...");
        final MutableInt waitingFor = new MutableInt(1);
        final Object waitingForMonitor = new Object();
        // Add listener for creation/deletion of workers
        workerPathCache.getListenable().addListener((client, event) -> {
            final Worker worker;
            switch(event.getType()) {
                case CHILD_ADDED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    synchronized (waitingForMonitor) {
                        waitingFor.increment();
                    }
                    Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() {

                        @Override
                        public void onSuccess(ZkWorker zkWorker) {
                            synchronized (waitingForMonitor) {
                                waitingFor.decrement();
                                waitingForMonitor.notifyAll();
                            }
                        }

                        @Override
                        public void onFailure(Throwable throwable) {
                            synchronized (waitingForMonitor) {
                                waitingFor.decrement();
                                waitingForMonitor.notifyAll();
                            }
                        }
                    });
                    break;
                case CHILD_UPDATED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    updateWorker(worker);
                    break;
                case CHILD_REMOVED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    removeWorker(worker);
                    break;
                case INITIALIZED:
                    // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running
                    List<String> workers;
                    try {
                        workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath());
                    } catch (KeeperException.NoNodeException e) {
                        // statusPath doesn't exist yet; can occur if no middleManagers have started.
                        workers = ImmutableList.of();
                    }
                    for (String workerId : workers) {
                        final String workerAnnouncePath = JOINER.join(indexerZkConfig.getAnnouncementsPath(), workerId);
                        final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), workerId);
                        if (!zkWorkers.containsKey(workerId) && cf.checkExists().forPath(workerAnnouncePath) == null) {
                            try {
                                scheduleTasksCleanupForWorker(workerId, cf.getChildren().forPath(workerStatusPath));
                            } catch (Exception e) {
                                log.warn(e, "Could not schedule cleanup for worker[%s] during startup (maybe someone removed the status znode[%s]?). Skipping.", workerId, workerStatusPath);
                            }
                        }
                    }
                    synchronized (waitingForMonitor) {
                        waitingFor.decrement();
                        waitingForMonitor.notifyAll();
                    }
                    break;
                case CONNECTION_SUSPENDED:
                case CONNECTION_RECONNECTED:
                case CONNECTION_LOST:
            }
        });
        workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
        synchronized (waitingForMonitor) {
            while (waitingFor.intValue() > 0) {
                waitingForMonitor.wait();
            }
        }
        ScheduledExecutors.scheduleAtFixedRate(cleanupExec, Period.ZERO.toStandardDuration(), config.getWorkerBlackListCleanupPeriod().toStandardDuration(), this::checkBlackListedNodes);
        provisioningService = provisioningStrategy.makeProvisioningService(this);
        lifecycleLock.started();
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        lifecycleLock.exitStart();
    }
}
Also used : TimeoutException(java.util.concurrent.TimeoutException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) MutableInt(org.apache.commons.lang.mutable.MutableInt) Worker(org.apache.druid.indexing.worker.Worker) KeeperException(org.apache.zookeeper.KeeperException) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Aggregations

LifecycleStart (org.apache.druid.java.util.common.lifecycle.LifecycleStart)15 ISE (org.apache.druid.java.util.common.ISE)8 Duration (org.joda.time.Duration)8 IOException (java.io.IOException)5 Map (java.util.Map)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 Function (com.google.common.base.Function)3 File (java.io.File)3 LifecycleStop (org.apache.druid.java.util.common.lifecycle.LifecycleStop)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 Maps (com.google.common.collect.Maps)2 Collection (java.util.Collection)2 List (java.util.List)2 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)2 DiscoveryDruidNode (org.apache.druid.discovery.DiscoveryDruidNode)2 DruidNodeDiscovery (org.apache.druid.discovery.DruidNodeDiscovery)2 DruidNodeDiscoveryProvider (org.apache.druid.discovery.DruidNodeDiscoveryProvider)2 DateTimes (org.apache.druid.java.util.common.DateTimes)2 Pair (org.apache.druid.java.util.common.Pair)2 ScheduledExecutors (org.apache.druid.java.util.common.concurrent.ScheduledExecutors)2