Search in sources :

Example 1 with TaskState

use of org.apache.druid.indexer.TaskState in project druid by druid-io.

the class ParallelIndexSupervisorTask method runSinglePhaseParallel.

/**
 * Run the single phase parallel indexing for best-effort rollup. In this mode, each sub task created by
 * the supervisor task reads data and generates segments individually.
 */
private TaskStatus runSinglePhaseParallel(TaskToolbox toolbox) throws Exception {
    ingestionState = IngestionState.BUILD_SEGMENTS;
    ParallelIndexTaskRunner<SinglePhaseSubTask, PushedSegmentsReport> parallelSinglePhaseRunner = createRunner(toolbox, this::createSinglePhaseTaskRunner);
    final TaskState state = runNextPhase(parallelSinglePhaseRunner);
    TaskStatus taskStatus;
    if (state.isSuccess()) {
        // noinspection ConstantConditions
        publishSegments(toolbox, parallelSinglePhaseRunner.getReports());
        if (awaitSegmentAvailabilityTimeoutMillis > 0) {
            waitForSegmentAvailability(parallelSinglePhaseRunner.getReports());
        }
        taskStatus = TaskStatus.success(getId());
    } else {
        // there is only success or failure after running....
        Preconditions.checkState(state.isFailure(), "Unrecognized state after task is complete[%s]", state);
        final String errorMessage;
        if (parallelSinglePhaseRunner.getStopReason() != null) {
            errorMessage = parallelSinglePhaseRunner.getStopReason();
        } else {
            errorMessage = StringUtils.format(TASK_PHASE_FAILURE_MSG, parallelSinglePhaseRunner.getName());
        }
        taskStatus = TaskStatus.failure(getId(), errorMessage);
    }
    toolbox.getTaskReportFileWriter().write(getId(), getTaskCompletionReports(taskStatus, segmentAvailabilityConfirmationCompleted));
    return taskStatus;
}
Also used : TaskStatus(org.apache.druid.indexer.TaskStatus) TaskState(org.apache.druid.indexer.TaskState)

Example 2 with TaskState

use of org.apache.druid.indexer.TaskState in project druid by druid-io.

the class ParallelIndexSupervisorTask method runHashPartitionMultiPhaseParallel.

@VisibleForTesting
TaskStatus runHashPartitionMultiPhaseParallel(TaskToolbox toolbox) throws Exception {
    TaskState state;
    ParallelIndexIngestionSpec ingestionSchemaToUse = ingestionSchema;
    if (!(ingestionSchema.getTuningConfig().getPartitionsSpec() instanceof HashedPartitionsSpec)) {
        // only range and hash partitioning is supported for multiphase parallel ingestion, see runMultiPhaseParallel()
        throw new ISE("forceGuaranteedRollup is set but partitionsSpec [%s] is not a single_dim or hash partition spec.", ingestionSchema.getTuningConfig().getPartitionsSpec());
    }
    final Map<Interval, Integer> intervalToNumShards;
    HashedPartitionsSpec partitionsSpec = (HashedPartitionsSpec) ingestionSchema.getTuningConfig().getPartitionsSpec();
    final boolean needsInputSampling = partitionsSpec.getNumShards() == null || ingestionSchemaToUse.getDataSchema().getGranularitySpec().inputIntervals().isEmpty();
    if (needsInputSampling) {
        // 0. need to determine intervals and numShards by scanning the data
        LOG.info("Needs to determine intervals or numShards, beginning %s phase.", PartialDimensionCardinalityTask.TYPE);
        ParallelIndexTaskRunner<PartialDimensionCardinalityTask, DimensionCardinalityReport> cardinalityRunner = createRunner(toolbox, this::createPartialDimensionCardinalityRunner);
        state = runNextPhase(cardinalityRunner);
        if (state.isFailure()) {
            String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, cardinalityRunner.getName());
            return TaskStatus.failure(getId(), errMsg);
        }
        if (cardinalityRunner.getReports().isEmpty()) {
            String msg = "No valid rows for hash partitioning." + " All rows may have invalid timestamps or have been filtered out.";
            LOG.warn(msg);
            return TaskStatus.success(getId(), msg);
        }
        if (partitionsSpec.getNumShards() == null) {
            int effectiveMaxRowsPerSegment = partitionsSpec.getMaxRowsPerSegment() == null ? PartitionsSpec.DEFAULT_MAX_ROWS_PER_SEGMENT : partitionsSpec.getMaxRowsPerSegment();
            LOG.info("effective maxRowsPerSegment is: " + effectiveMaxRowsPerSegment);
            intervalToNumShards = determineNumShardsFromCardinalityReport(cardinalityRunner.getReports().values(), effectiveMaxRowsPerSegment);
        } else {
            intervalToNumShards = CollectionUtils.mapValues(mergeCardinalityReports(cardinalityRunner.getReports().values()), k -> partitionsSpec.getNumShards());
        }
        ingestionSchemaToUse = rewriteIngestionSpecWithIntervalsIfMissing(ingestionSchemaToUse, intervalToNumShards.keySet());
    } else {
        // numShards will be determined in PartialHashSegmentGenerateTask
        intervalToNumShards = null;
    }
    // 1. Partial segment generation phase
    final ParallelIndexIngestionSpec segmentCreateIngestionSpec = ingestionSchemaToUse;
    ParallelIndexTaskRunner<PartialHashSegmentGenerateTask, GeneratedPartitionsReport> indexingRunner = createRunner(toolbox, f -> createPartialHashSegmentGenerateRunner(toolbox, segmentCreateIngestionSpec, intervalToNumShards));
    state = runNextPhase(indexingRunner);
    if (state.isFailure()) {
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, indexingRunner.getName());
        return TaskStatus.failure(getId(), errMsg);
    }
    // 2. Partial segment merge phase
    // partition (interval, partitionId) -> partition locations
    Map<Partition, List<PartitionLocation>> partitionToLocations = getPartitionToLocations(indexingRunner.getReports());
    final List<PartialSegmentMergeIOConfig> ioConfigs = createGenericMergeIOConfigs(ingestionSchema.getTuningConfig().getTotalNumMergeTasks(), partitionToLocations);
    final ParallelIndexIngestionSpec segmentMergeIngestionSpec = ingestionSchemaToUse;
    final ParallelIndexTaskRunner<PartialGenericSegmentMergeTask, PushedSegmentsReport> mergeRunner = createRunner(toolbox, tb -> createPartialGenericSegmentMergeRunner(tb, ioConfigs, segmentMergeIngestionSpec));
    state = runNextPhase(mergeRunner);
    TaskStatus taskStatus;
    if (state.isSuccess()) {
        // noinspection ConstantConditions
        publishSegments(toolbox, mergeRunner.getReports());
        if (awaitSegmentAvailabilityTimeoutMillis > 0) {
            waitForSegmentAvailability(mergeRunner.getReports());
        }
        taskStatus = TaskStatus.success(getId());
    } else {
        // there is only success or failure after running....
        Preconditions.checkState(state.isFailure(), "Unrecognized state after task is complete[%s]", state);
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, mergeRunner.getName());
        taskStatus = TaskStatus.failure(getId(), errMsg);
    }
    toolbox.getTaskReportFileWriter().write(getId(), getTaskCompletionReports(taskStatus, segmentAvailabilityConfirmationCompleted));
    return taskStatus;
}
Also used : ArrayListMultimap(com.google.common.collect.ArrayListMultimap) TaskReport(org.apache.druid.indexing.common.TaskReport) TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) PartitionBoundaries(org.apache.druid.timeline.partition.PartitionBoundaries) Produces(javax.ws.rs.Produces) IngestionState(org.apache.druid.indexer.IngestionState) Pair(org.apache.druid.java.util.common.Pair) MediaType(javax.ws.rs.core.MediaType) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) SegmentTransactionalInsertAction(org.apache.druid.indexing.common.actions.SegmentTransactionalInsertAction) FiniteFirehoseFactory(org.apache.druid.data.input.FiniteFirehoseFactory) Map(java.util.Map) StringDistribution(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringDistribution) AbstractBatchIndexTask(org.apache.druid.indexing.common.task.AbstractBatchIndexTask) InputFormat(org.apache.druid.data.input.InputFormat) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) TaskState(org.apache.druid.indexer.TaskState) IndexTuningConfig(org.apache.druid.indexing.common.task.IndexTask.IndexTuningConfig) IndexTaskUtils(org.apache.druid.indexing.common.task.IndexTaskUtils) Granularity(org.apache.druid.java.util.common.granularity.Granularity) GET(javax.ws.rs.GET) Tasks(org.apache.druid.indexing.common.task.Tasks) TaskStatus(org.apache.druid.indexer.TaskStatus) ArrayList(java.util.ArrayList) IndexTask(org.apache.druid.indexing.common.task.IndexTask) Interval(org.joda.time.Interval) HttpServletRequest(javax.servlet.http.HttpServletRequest) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) StringSketchMerger(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringSketchMerger) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) Nullable(javax.annotation.Nullable) BuildingShardSpec(org.apache.druid.timeline.partition.BuildingShardSpec) GranularitySpec(org.apache.druid.segment.indexing.granularity.GranularitySpec) Throwables(com.google.common.base.Throwables) StringDistributionMerger(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringDistributionMerger) IOException(java.io.IOException) TreeMap(java.util.TreeMap) ChatHandlers(org.apache.druid.segment.realtime.firehose.ChatHandlers) Preconditions(com.google.common.base.Preconditions) ArbitraryGranularitySpec(org.apache.druid.segment.indexing.granularity.ArbitraryGranularitySpec) SubTaskSpecStatus(org.apache.druid.indexing.common.task.batch.parallel.ParallelIndexTaskRunner.SubTaskSpecStatus) HllSketch(org.apache.datasketches.hll.HllSketch) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) Path(javax.ws.rs.Path) Memory(org.apache.datasketches.memory.Memory) TaskResource(org.apache.druid.indexing.common.task.TaskResource) MonotonicNonNull(org.checkerframework.checker.nullness.qual.MonotonicNonNull) ChatHandler(org.apache.druid.segment.realtime.firehose.ChatHandler) QueryParam(javax.ws.rs.QueryParam) Consumes(javax.ws.rs.Consumes) Union(org.apache.datasketches.hll.Union) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SmileMediaTypes(com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes) Context(javax.ws.rs.core.Context) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) Action(org.apache.druid.server.security.Action) Collectors(java.util.stream.Collectors) MaxAllowedLocksExceededException(org.apache.druid.indexing.common.task.batch.MaxAllowedLocksExceededException) Objects(java.util.Objects) IndexIngestionSpec(org.apache.druid.indexing.common.task.IndexTask.IndexIngestionSpec) List(java.util.List) Response(javax.ws.rs.core.Response) DataSegment(org.apache.druid.timeline.DataSegment) Entry(java.util.Map.Entry) CurrentSubTaskHolder(org.apache.druid.indexing.common.task.CurrentSubTaskHolder) Logger(org.apache.druid.java.util.common.logger.Logger) PathParam(javax.ws.rs.PathParam) CollectionUtils(org.apache.druid.utils.CollectionUtils) HashMap(java.util.HashMap) Multimap(com.google.common.collect.Multimap) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) Function(java.util.function.Function) TuningConfig(org.apache.druid.segment.indexing.TuningConfig) HashSet(java.util.HashSet) InputSource(org.apache.druid.data.input.InputSource) RowIngestionMetersTotals(org.apache.druid.segment.incremental.RowIngestionMetersTotals) Status(javax.ws.rs.core.Response.Status) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) POST(javax.ws.rs.POST) TransactionalSegmentPublisher(org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) DateTime(org.joda.time.DateTime) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) IntermediaryDataManager(org.apache.druid.indexing.worker.shuffle.IntermediaryDataManager) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Collections(java.util.Collections) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) ISE(org.apache.druid.java.util.common.ISE) ArrayList(java.util.ArrayList) List(java.util.List) TaskStatus(org.apache.druid.indexer.TaskStatus) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TaskState(org.apache.druid.indexer.TaskState) Interval(org.joda.time.Interval) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 3 with TaskState

use of org.apache.druid.indexer.TaskState in project druid by druid-io.

the class HttpRemoteTaskRunner method taskComplete.

// CAUTION: This method calls RemoteTaskRunnerWorkItem.setResult(..) which results in TaskQueue.notifyStatus() being called
// because that is attached by TaskQueue to task result future. So, this method must not be called with "statusLock"
// held. See https://github.com/apache/druid/issues/6201
private void taskComplete(HttpRemoteTaskRunnerWorkItem taskRunnerWorkItem, WorkerHolder workerHolder, TaskStatus taskStatus) {
    Preconditions.checkState(!Thread.holdsLock(statusLock), "Current thread must not hold statusLock.");
    Preconditions.checkNotNull(taskRunnerWorkItem, "taskRunnerWorkItem");
    Preconditions.checkNotNull(taskStatus, "taskStatus");
    if (workerHolder != null) {
        log.info("Worker[%s] completed task[%s] with status[%s]", workerHolder.getWorker().getHost(), taskStatus.getId(), taskStatus.getStatusCode());
        // Worker is done with this task
        workerHolder.setLastCompletedTaskTime(DateTimes.nowUtc());
    }
    if (taskRunnerWorkItem.getResult().isDone()) {
        // This is not the first complete event.
        try {
            TaskState lastKnownState = taskRunnerWorkItem.getResult().get().getStatusCode();
            if (taskStatus.getStatusCode() != lastKnownState) {
                log.warn("The state of the new task complete event is different from its last known state. " + "New state[%s], last known state[%s]", taskStatus.getStatusCode(), lastKnownState);
            }
        } catch (InterruptedException e) {
            log.warn(e, "Interrupted while getting the last known task status.");
            Thread.currentThread().interrupt();
        } catch (ExecutionException e) {
            // This case should not really happen.
            log.warn(e, "Failed to get the last known task status. Ignoring this failure.");
        }
    } else {
        // Notify interested parties
        taskRunnerWorkItem.setResult(taskStatus);
        TaskRunnerUtils.notifyStatusChanged(listeners, taskStatus.getId(), taskStatus);
        // Update success/failure counters, Blacklist node if there are too many failures.
        if (workerHolder != null) {
            blacklistWorkerIfNeeded(taskStatus, workerHolder);
        }
    }
    synchronized (statusLock) {
        statusLock.notifyAll();
    }
}
Also used : ExecutionException(java.util.concurrent.ExecutionException) TaskState(org.apache.druid.indexer.TaskState) RunnerTaskState(org.apache.druid.indexer.RunnerTaskState)

Example 4 with TaskState

use of org.apache.druid.indexer.TaskState in project druid by druid-io.

the class RemoteTaskRunner method taskComplete.

private void taskComplete(RemoteTaskRunnerWorkItem taskRunnerWorkItem, @Nullable ZkWorker zkWorker, TaskStatus taskStatus) {
    Preconditions.checkNotNull(taskRunnerWorkItem, "taskRunnerWorkItem");
    Preconditions.checkNotNull(taskStatus, "taskStatus");
    if (zkWorker != null) {
        log.info("Worker[%s] completed task[%s] with status[%s]", zkWorker.getWorker().getHost(), taskStatus.getId(), taskStatus.getStatusCode());
        // Worker is done with this task
        zkWorker.setLastCompletedTaskTime(DateTimes.nowUtc());
    } else {
        log.info("Workerless task[%s] completed with status[%s]", taskStatus.getId(), taskStatus.getStatusCode());
    }
    // Move from running -> complete
    // If the task was running and this is the first complete event,
    // previousComplete should be null and removedRunning should not.
    final RemoteTaskRunnerWorkItem previousComplete = completeTasks.put(taskStatus.getId(), taskRunnerWorkItem);
    final RemoteTaskRunnerWorkItem removedRunning = runningTasks.remove(taskStatus.getId());
    if (previousComplete != null && removedRunning != null) {
        log.warn("This is not the first complete event for task[%s], but it was still known as running. " + "Ignoring the previously known running status.", taskStatus.getId());
    }
    if (previousComplete != null) {
        // This is not the first complete event for the same task.
        try {
            // getResult().get() must return immediately.
            TaskState lastKnownState = previousComplete.getResult().get(1, TimeUnit.MILLISECONDS).getStatusCode();
            if (taskStatus.getStatusCode() != lastKnownState) {
                log.warn("The state of the new task complete event is different from its last known state. " + "New state[%s], last known state[%s]", taskStatus.getStatusCode(), lastKnownState);
            }
        } catch (InterruptedException e) {
            log.warn(e, "Interrupted while getting the last known task status.");
            Thread.currentThread().interrupt();
        } catch (ExecutionException | TimeoutException e) {
            // This case should not really happen.
            log.warn(e, "Failed to get the last known task status. Ignoring this failure.");
        }
    } else {
        // Update success/failure counters
        if (zkWorker != null) {
            if (taskStatus.isSuccess()) {
                zkWorker.resetContinuouslyFailedTasksCount();
                if (blackListedWorkers.remove(zkWorker)) {
                    zkWorker.setBlacklistedUntil(null);
                    log.info("[%s] removed from blacklist because a task finished with SUCCESS", zkWorker.getWorker());
                }
            } else if (taskStatus.isFailure()) {
                zkWorker.incrementContinuouslyFailedTasksCount();
            }
            // Blacklist node if there are too many failures.
            synchronized (blackListedWorkers) {
                if (zkWorker.getContinuouslyFailedTasksCount() > config.getMaxRetriesBeforeBlacklist() && blackListedWorkers.size() <= zkWorkers.size() * (config.getMaxPercentageBlacklistWorkers() / 100.0) - 1) {
                    zkWorker.setBlacklistedUntil(DateTimes.nowUtc().plus(config.getWorkerBlackListBackoffTime()));
                    if (blackListedWorkers.add(zkWorker)) {
                        log.info("Blacklisting [%s] until [%s] after [%,d] failed tasks in a row.", zkWorker.getWorker(), zkWorker.getBlacklistedUntil(), zkWorker.getContinuouslyFailedTasksCount());
                    }
                }
            }
        }
        // Notify interested parties
        taskRunnerWorkItem.setResult(taskStatus);
        TaskRunnerUtils.notifyStatusChanged(listeners, taskStatus.getId(), taskStatus);
    }
}
Also used : ExecutionException(java.util.concurrent.ExecutionException) TaskState(org.apache.druid.indexer.TaskState) RunnerTaskState(org.apache.druid.indexer.RunnerTaskState) TimeoutException(java.util.concurrent.TimeoutException)

Example 5 with TaskState

use of org.apache.druid.indexer.TaskState in project druid by druid-io.

the class ParallelIndexSupervisorTask method runRangePartitionMultiPhaseParallel.

@VisibleForTesting
TaskStatus runRangePartitionMultiPhaseParallel(TaskToolbox toolbox) throws Exception {
    ParallelIndexIngestionSpec ingestionSchemaToUse = ingestionSchema;
    ParallelIndexTaskRunner<PartialDimensionDistributionTask, DimensionDistributionReport> distributionRunner = createRunner(toolbox, this::createPartialDimensionDistributionRunner);
    TaskState distributionState = runNextPhase(distributionRunner);
    if (distributionState.isFailure()) {
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, distributionRunner.getName());
        return TaskStatus.failure(getId(), errMsg);
    }
    Map<Interval, PartitionBoundaries> intervalToPartitions = determineAllRangePartitions(distributionRunner.getReports().values());
    if (intervalToPartitions.isEmpty()) {
        String msg = "No valid rows for single dimension partitioning." + " All rows may have invalid timestamps or multiple dimension values.";
        LOG.warn(msg);
        return TaskStatus.success(getId(), msg);
    }
    ingestionSchemaToUse = rewriteIngestionSpecWithIntervalsIfMissing(ingestionSchemaToUse, intervalToPartitions.keySet());
    final ParallelIndexIngestionSpec segmentCreateIngestionSpec = ingestionSchemaToUse;
    ParallelIndexTaskRunner<PartialRangeSegmentGenerateTask, GeneratedPartitionsReport> indexingRunner = createRunner(toolbox, tb -> createPartialRangeSegmentGenerateRunner(tb, intervalToPartitions, segmentCreateIngestionSpec));
    TaskState indexingState = runNextPhase(indexingRunner);
    if (indexingState.isFailure()) {
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, indexingRunner.getName());
        return TaskStatus.failure(getId(), errMsg);
    }
    // partition (interval, partitionId) -> partition locations
    Map<Partition, List<PartitionLocation>> partitionToLocations = getPartitionToLocations(indexingRunner.getReports());
    final List<PartialSegmentMergeIOConfig> ioConfigs = createGenericMergeIOConfigs(ingestionSchema.getTuningConfig().getTotalNumMergeTasks(), partitionToLocations);
    final ParallelIndexIngestionSpec segmentMergeIngestionSpec = ingestionSchemaToUse;
    ParallelIndexTaskRunner<PartialGenericSegmentMergeTask, PushedSegmentsReport> mergeRunner = createRunner(toolbox, tb -> createPartialGenericSegmentMergeRunner(tb, ioConfigs, segmentMergeIngestionSpec));
    TaskState mergeState = runNextPhase(mergeRunner);
    TaskStatus taskStatus;
    if (mergeState.isSuccess()) {
        publishSegments(toolbox, mergeRunner.getReports());
        if (awaitSegmentAvailabilityTimeoutMillis > 0) {
            waitForSegmentAvailability(mergeRunner.getReports());
        }
        taskStatus = TaskStatus.success(getId());
    } else {
        // there is only success or failure after running....
        Preconditions.checkState(mergeState.isFailure(), "Unrecognized state after task is complete[%s]", mergeState);
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, mergeRunner.getName());
        taskStatus = TaskStatus.failure(getId(), errMsg);
    }
    toolbox.getTaskReportFileWriter().write(getId(), getTaskCompletionReports(taskStatus, segmentAvailabilityConfirmationCompleted));
    return taskStatus;
}
Also used : TaskStatus(org.apache.druid.indexer.TaskStatus) ArrayList(java.util.ArrayList) List(java.util.List) TaskState(org.apache.druid.indexer.TaskState) PartitionBoundaries(org.apache.druid.timeline.partition.PartitionBoundaries) Interval(org.joda.time.Interval) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

TaskState (org.apache.druid.indexer.TaskState)6 TaskStatus (org.apache.druid.indexer.TaskStatus)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 ExecutionException (java.util.concurrent.ExecutionException)2 RunnerTaskState (org.apache.druid.indexer.RunnerTaskState)2 JsonCreator (com.fasterxml.jackson.annotation.JsonCreator)1 JsonProperty (com.fasterxml.jackson.annotation.JsonProperty)1 SmileMediaTypes (com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes)1 Preconditions (com.google.common.base.Preconditions)1 Throwables (com.google.common.base.Throwables)1 ArrayListMultimap (com.google.common.collect.ArrayListMultimap)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 Multimap (com.google.common.collect.Multimap)1 IOException (java.io.IOException)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 Comparator (java.util.Comparator)1 HashMap (java.util.HashMap)1