Search in sources :

Example 1 with ImmutableWorkerInfo

use of io.druid.indexing.overlord.ImmutableWorkerInfo in project druid by druid-io.

the class EqualDistributionWorkerSelectStrategy method findWorkerForTask.

@Override
public Optional<ImmutableWorkerInfo> findWorkerForTask(WorkerTaskRunnerConfig config, ImmutableMap<String, ImmutableWorkerInfo> zkWorkers, Task task) {
    final TreeSet<ImmutableWorkerInfo> sortedWorkers = Sets.newTreeSet(new Comparator<ImmutableWorkerInfo>() {

        @Override
        public int compare(ImmutableWorkerInfo zkWorker, ImmutableWorkerInfo zkWorker2) {
            int retVal = -Ints.compare(zkWorker2.getCurrCapacityUsed(), zkWorker.getCurrCapacityUsed());
            if (retVal == 0) {
                retVal = zkWorker2.getWorker().getVersion().compareTo(zkWorker.getWorker().getVersion());
            }
            return retVal;
        }
    });
    sortedWorkers.addAll(zkWorkers.values());
    final String minWorkerVer = config.getMinWorkerVersion();
    for (ImmutableWorkerInfo zkWorker : sortedWorkers) {
        if (zkWorker.canRunTask(task) && zkWorker.isValidVersion(minWorkerVer)) {
            return Optional.of(zkWorker);
        }
    }
    return Optional.absent();
}
Also used : ImmutableWorkerInfo(io.druid.indexing.overlord.ImmutableWorkerInfo)

Example 2 with ImmutableWorkerInfo

use of io.druid.indexing.overlord.ImmutableWorkerInfo in project druid by druid-io.

the class FillCapacityWithAffinityWorkerSelectStrategy method findWorkerForTask.

@Override
public Optional<ImmutableWorkerInfo> findWorkerForTask(final WorkerTaskRunnerConfig config, final ImmutableMap<String, ImmutableWorkerInfo> zkWorkers, final Task task) {
    // don't run other datasources on affinity workers; we only want our configured datasources to run on them
    ImmutableMap.Builder<String, ImmutableWorkerInfo> builder = new ImmutableMap.Builder<>();
    for (String workerHost : zkWorkers.keySet()) {
        if (!affinityWorkerHosts.contains(workerHost)) {
            builder.put(workerHost, zkWorkers.get(workerHost));
        }
    }
    ImmutableMap<String, ImmutableWorkerInfo> eligibleWorkers = builder.build();
    List<String> workerHosts = affinityConfig.getAffinity().get(task.getDataSource());
    if (workerHosts == null) {
        return super.findWorkerForTask(config, eligibleWorkers, task);
    }
    ImmutableMap.Builder<String, ImmutableWorkerInfo> affinityBuilder = new ImmutableMap.Builder<>();
    for (String workerHost : workerHosts) {
        ImmutableWorkerInfo zkWorker = zkWorkers.get(workerHost);
        if (zkWorker != null) {
            affinityBuilder.put(workerHost, zkWorker);
        }
    }
    ImmutableMap<String, ImmutableWorkerInfo> affinityWorkers = affinityBuilder.build();
    if (!affinityWorkers.isEmpty()) {
        Optional<ImmutableWorkerInfo> retVal = super.findWorkerForTask(config, affinityWorkers, task);
        if (retVal.isPresent()) {
            return retVal;
        }
    }
    return super.findWorkerForTask(config, eligibleWorkers, task);
}
Also used : ImmutableMap(com.google.common.collect.ImmutableMap) ImmutableWorkerInfo(io.druid.indexing.overlord.ImmutableWorkerInfo)

Example 3 with ImmutableWorkerInfo

use of io.druid.indexing.overlord.ImmutableWorkerInfo in project druid by druid-io.

the class PendingTaskBasedWorkerResourceManagementStrategy method doTerminate.

@Override
public boolean doTerminate(WorkerTaskRunner runner) {
    Collection<ImmutableWorkerInfo> zkWorkers = runner.getWorkers();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }
        if (!currentlyProvisioning.isEmpty()) {
            log.debug("Already provisioning nodes, Not Terminating any nodes.");
            return false;
        }
        boolean didTerminate = false;
        final Collection<String> workerNodeIds = getWorkerNodeIDs(runner.getLazyWorkers(), workerConfig);
        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);
        if (currentlyTerminating.isEmpty()) {
            final int maxWorkersToTerminate = maxWorkersToTerminate(zkWorkers, workerConfig);
            final Predicate<ImmutableWorkerInfo> isLazyWorker = ResourceManagementUtil.createLazyWorkerPredicate(config);
            final List<String> laziestWorkerIps = Lists.newArrayList(Collections2.transform(runner.markWorkersLazy(isLazyWorker, maxWorkersToTerminate), new Function<Worker, String>() {

                @Override
                public String apply(Worker zkWorker) {
                    return zkWorker.getIp();
                }
            }));
            if (laziestWorkerIps.isEmpty()) {
                log.debug("Found no lazy workers");
            } else {
                log.info("Terminating %,d lazy workers: %s", laziestWorkerIps.size(), Joiner.on(", ").join(laziestWorkerIps));
                final AutoScalingData terminated = workerConfig.getAutoScaler().terminate(laziestWorkerIps);
                if (terminated != null) {
                    currentlyTerminating.addAll(terminated.getNodeIds());
                    lastTerminateTime = new DateTime();
                    scalingStats.addTerminateEvent(terminated);
                    didTerminate = true;
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());
            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);
            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!").addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis()).addData("terminatingCount", currentlyTerminating.size()).emit();
                currentlyTerminating.clear();
            }
        }
        return didTerminate;
    }
}
Also used : Duration(org.joda.time.Duration) ImmutableWorkerInfo(io.druid.indexing.overlord.ImmutableWorkerInfo) DateTime(org.joda.time.DateTime) WorkerBehaviorConfig(io.druid.indexing.overlord.setup.WorkerBehaviorConfig) Function(com.google.common.base.Function) Worker(io.druid.indexing.worker.Worker)

Example 4 with ImmutableWorkerInfo

use of io.druid.indexing.overlord.ImmutableWorkerInfo in project druid by druid-io.

the class SimpleWorkerResourceManagementStrategy method doProvision.

protected boolean doProvision(WorkerTaskRunner runner) {
    Collection<? extends TaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    Collection<ImmutableWorkerInfo> workers = getWorkers(runner);
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.warn("No workerConfig available, cannot provision new workers.");
            return false;
        }
        final Predicate<ImmutableWorkerInfo> isValidWorker = ResourceManagementUtil.createValidWorkerPredicate(config);
        final int currValidWorkers = Collections2.filter(workers, isValidWorker).size();
        final List<String> workerNodeIds = workerConfig.getAutoScaler().ipToIdLookup(Lists.newArrayList(Iterables.transform(workers, new Function<ImmutableWorkerInfo, String>() {

            @Override
            public String apply(ImmutableWorkerInfo input) {
                return input.getWorker().getIp();
            }
        })));
        currentlyProvisioning.removeAll(workerNodeIds);
        updateTargetWorkerCount(workerConfig, pendingTasks, workers);
        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }
        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());
            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);
            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!").addData("millisSinceLastProvision", durSinceLastProvision.getMillis()).addData("provisioningCount", currentlyProvisioning.size()).emit();
                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }
        return didProvision;
    }
}
Also used : Duration(org.joda.time.Duration) ImmutableWorkerInfo(io.druid.indexing.overlord.ImmutableWorkerInfo) DateTime(org.joda.time.DateTime) WorkerBehaviorConfig(io.druid.indexing.overlord.setup.WorkerBehaviorConfig)

Example 5 with ImmutableWorkerInfo

use of io.druid.indexing.overlord.ImmutableWorkerInfo in project druid by druid-io.

the class JavaScriptWorkerSelectStrategyTest method testFillWorkerCapacity.

@Test
public void testFillWorkerCapacity() {
    // tasks shoudl be assigned to the worker with maximum currCapacity used until its full
    ImmutableMap<String, ImmutableWorkerInfo> workerMap = ImmutableMap.of("10.0.0.1", createMockWorker(1, true, true), "10.0.0.2", createMockWorker(5, true, true));
    Optional<ImmutableWorkerInfo> workerForBatchTask = STRATEGY.findWorkerForTask(new TestRemoteTaskRunnerConfig(new Period("PT1S")), workerMap, createMockTask("index_hadoop"));
    Assert.assertEquals(workerMap.get("10.0.0.2"), workerForBatchTask.get());
}
Also used : TestRemoteTaskRunnerConfig(io.druid.indexing.overlord.TestRemoteTaskRunnerConfig) Period(org.joda.time.Period) ImmutableWorkerInfo(io.druid.indexing.overlord.ImmutableWorkerInfo) Test(org.junit.Test)

Aggregations

ImmutableWorkerInfo (io.druid.indexing.overlord.ImmutableWorkerInfo)29 Test (org.junit.Test)20 Worker (io.druid.indexing.worker.Worker)10 RemoteTaskRunner (io.druid.indexing.overlord.RemoteTaskRunner)9 RemoteTaskRunnerConfig (io.druid.indexing.overlord.config.RemoteTaskRunnerConfig)9 DateTime (org.joda.time.DateTime)9 NoopTask (io.druid.indexing.common.task.NoopTask)8 RemoteTaskRunnerWorkItem (io.druid.indexing.overlord.RemoteTaskRunnerWorkItem)5 TestRemoteTaskRunnerConfig (io.druid.indexing.overlord.TestRemoteTaskRunnerConfig)5 Period (org.joda.time.Period)5 Task (io.druid.indexing.common.task.Task)4 WorkerBehaviorConfig (io.druid.indexing.overlord.setup.WorkerBehaviorConfig)4 Duration (org.joda.time.Duration)4 Function (com.google.common.base.Function)3 List (java.util.List)3 ZkWorker (io.druid.indexing.overlord.ZkWorker)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 WorkerSelectStrategy (io.druid.indexing.overlord.setup.WorkerSelectStrategy)1