Search in sources :

Example 1 with TaskToolbox

use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.

the class ParallelIndexSupervisorTask method runHashPartitionMultiPhaseParallel.

@VisibleForTesting
TaskStatus runHashPartitionMultiPhaseParallel(TaskToolbox toolbox) throws Exception {
    TaskState state;
    ParallelIndexIngestionSpec ingestionSchemaToUse = ingestionSchema;
    if (!(ingestionSchema.getTuningConfig().getPartitionsSpec() instanceof HashedPartitionsSpec)) {
        // only range and hash partitioning is supported for multiphase parallel ingestion, see runMultiPhaseParallel()
        throw new ISE("forceGuaranteedRollup is set but partitionsSpec [%s] is not a single_dim or hash partition spec.", ingestionSchema.getTuningConfig().getPartitionsSpec());
    }
    final Map<Interval, Integer> intervalToNumShards;
    HashedPartitionsSpec partitionsSpec = (HashedPartitionsSpec) ingestionSchema.getTuningConfig().getPartitionsSpec();
    final boolean needsInputSampling = partitionsSpec.getNumShards() == null || ingestionSchemaToUse.getDataSchema().getGranularitySpec().inputIntervals().isEmpty();
    if (needsInputSampling) {
        // 0. need to determine intervals and numShards by scanning the data
        LOG.info("Needs to determine intervals or numShards, beginning %s phase.", PartialDimensionCardinalityTask.TYPE);
        ParallelIndexTaskRunner<PartialDimensionCardinalityTask, DimensionCardinalityReport> cardinalityRunner = createRunner(toolbox, this::createPartialDimensionCardinalityRunner);
        state = runNextPhase(cardinalityRunner);
        if (state.isFailure()) {
            String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, cardinalityRunner.getName());
            return TaskStatus.failure(getId(), errMsg);
        }
        if (cardinalityRunner.getReports().isEmpty()) {
            String msg = "No valid rows for hash partitioning." + " All rows may have invalid timestamps or have been filtered out.";
            LOG.warn(msg);
            return TaskStatus.success(getId(), msg);
        }
        if (partitionsSpec.getNumShards() == null) {
            int effectiveMaxRowsPerSegment = partitionsSpec.getMaxRowsPerSegment() == null ? PartitionsSpec.DEFAULT_MAX_ROWS_PER_SEGMENT : partitionsSpec.getMaxRowsPerSegment();
            LOG.info("effective maxRowsPerSegment is: " + effectiveMaxRowsPerSegment);
            intervalToNumShards = determineNumShardsFromCardinalityReport(cardinalityRunner.getReports().values(), effectiveMaxRowsPerSegment);
        } else {
            intervalToNumShards = CollectionUtils.mapValues(mergeCardinalityReports(cardinalityRunner.getReports().values()), k -> partitionsSpec.getNumShards());
        }
        ingestionSchemaToUse = rewriteIngestionSpecWithIntervalsIfMissing(ingestionSchemaToUse, intervalToNumShards.keySet());
    } else {
        // numShards will be determined in PartialHashSegmentGenerateTask
        intervalToNumShards = null;
    }
    // 1. Partial segment generation phase
    final ParallelIndexIngestionSpec segmentCreateIngestionSpec = ingestionSchemaToUse;
    ParallelIndexTaskRunner<PartialHashSegmentGenerateTask, GeneratedPartitionsReport> indexingRunner = createRunner(toolbox, f -> createPartialHashSegmentGenerateRunner(toolbox, segmentCreateIngestionSpec, intervalToNumShards));
    state = runNextPhase(indexingRunner);
    if (state.isFailure()) {
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, indexingRunner.getName());
        return TaskStatus.failure(getId(), errMsg);
    }
    // 2. Partial segment merge phase
    // partition (interval, partitionId) -> partition locations
    Map<Partition, List<PartitionLocation>> partitionToLocations = getPartitionToLocations(indexingRunner.getReports());
    final List<PartialSegmentMergeIOConfig> ioConfigs = createGenericMergeIOConfigs(ingestionSchema.getTuningConfig().getTotalNumMergeTasks(), partitionToLocations);
    final ParallelIndexIngestionSpec segmentMergeIngestionSpec = ingestionSchemaToUse;
    final ParallelIndexTaskRunner<PartialGenericSegmentMergeTask, PushedSegmentsReport> mergeRunner = createRunner(toolbox, tb -> createPartialGenericSegmentMergeRunner(tb, ioConfigs, segmentMergeIngestionSpec));
    state = runNextPhase(mergeRunner);
    TaskStatus taskStatus;
    if (state.isSuccess()) {
        // noinspection ConstantConditions
        publishSegments(toolbox, mergeRunner.getReports());
        if (awaitSegmentAvailabilityTimeoutMillis > 0) {
            waitForSegmentAvailability(mergeRunner.getReports());
        }
        taskStatus = TaskStatus.success(getId());
    } else {
        // there is only success or failure after running....
        Preconditions.checkState(state.isFailure(), "Unrecognized state after task is complete[%s]", state);
        String errMsg = StringUtils.format(TASK_PHASE_FAILURE_MSG, mergeRunner.getName());
        taskStatus = TaskStatus.failure(getId(), errMsg);
    }
    toolbox.getTaskReportFileWriter().write(getId(), getTaskCompletionReports(taskStatus, segmentAvailabilityConfirmationCompleted));
    return taskStatus;
}
Also used : ArrayListMultimap(com.google.common.collect.ArrayListMultimap) TaskReport(org.apache.druid.indexing.common.TaskReport) TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) PartitionBoundaries(org.apache.druid.timeline.partition.PartitionBoundaries) Produces(javax.ws.rs.Produces) IngestionState(org.apache.druid.indexer.IngestionState) Pair(org.apache.druid.java.util.common.Pair) MediaType(javax.ws.rs.core.MediaType) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) SegmentTransactionalInsertAction(org.apache.druid.indexing.common.actions.SegmentTransactionalInsertAction) FiniteFirehoseFactory(org.apache.druid.data.input.FiniteFirehoseFactory) Map(java.util.Map) StringDistribution(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringDistribution) AbstractBatchIndexTask(org.apache.druid.indexing.common.task.AbstractBatchIndexTask) InputFormat(org.apache.druid.data.input.InputFormat) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) TaskState(org.apache.druid.indexer.TaskState) IndexTuningConfig(org.apache.druid.indexing.common.task.IndexTask.IndexTuningConfig) IndexTaskUtils(org.apache.druid.indexing.common.task.IndexTaskUtils) Granularity(org.apache.druid.java.util.common.granularity.Granularity) GET(javax.ws.rs.GET) Tasks(org.apache.druid.indexing.common.task.Tasks) TaskStatus(org.apache.druid.indexer.TaskStatus) ArrayList(java.util.ArrayList) IndexTask(org.apache.druid.indexing.common.task.IndexTask) Interval(org.joda.time.Interval) HttpServletRequest(javax.servlet.http.HttpServletRequest) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) StringSketchMerger(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringSketchMerger) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) Nullable(javax.annotation.Nullable) BuildingShardSpec(org.apache.druid.timeline.partition.BuildingShardSpec) GranularitySpec(org.apache.druid.segment.indexing.granularity.GranularitySpec) Throwables(com.google.common.base.Throwables) StringDistributionMerger(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringDistributionMerger) IOException(java.io.IOException) TreeMap(java.util.TreeMap) ChatHandlers(org.apache.druid.segment.realtime.firehose.ChatHandlers) Preconditions(com.google.common.base.Preconditions) ArbitraryGranularitySpec(org.apache.druid.segment.indexing.granularity.ArbitraryGranularitySpec) SubTaskSpecStatus(org.apache.druid.indexing.common.task.batch.parallel.ParallelIndexTaskRunner.SubTaskSpecStatus) HllSketch(org.apache.datasketches.hll.HllSketch) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) Path(javax.ws.rs.Path) Memory(org.apache.datasketches.memory.Memory) TaskResource(org.apache.druid.indexing.common.task.TaskResource) MonotonicNonNull(org.checkerframework.checker.nullness.qual.MonotonicNonNull) ChatHandler(org.apache.druid.segment.realtime.firehose.ChatHandler) QueryParam(javax.ws.rs.QueryParam) Consumes(javax.ws.rs.Consumes) Union(org.apache.datasketches.hll.Union) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SmileMediaTypes(com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes) Context(javax.ws.rs.core.Context) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) Action(org.apache.druid.server.security.Action) Collectors(java.util.stream.Collectors) MaxAllowedLocksExceededException(org.apache.druid.indexing.common.task.batch.MaxAllowedLocksExceededException) Objects(java.util.Objects) IndexIngestionSpec(org.apache.druid.indexing.common.task.IndexTask.IndexIngestionSpec) List(java.util.List) Response(javax.ws.rs.core.Response) DataSegment(org.apache.druid.timeline.DataSegment) Entry(java.util.Map.Entry) CurrentSubTaskHolder(org.apache.druid.indexing.common.task.CurrentSubTaskHolder) Logger(org.apache.druid.java.util.common.logger.Logger) PathParam(javax.ws.rs.PathParam) CollectionUtils(org.apache.druid.utils.CollectionUtils) HashMap(java.util.HashMap) Multimap(com.google.common.collect.Multimap) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) Function(java.util.function.Function) TuningConfig(org.apache.druid.segment.indexing.TuningConfig) HashSet(java.util.HashSet) InputSource(org.apache.druid.data.input.InputSource) RowIngestionMetersTotals(org.apache.druid.segment.incremental.RowIngestionMetersTotals) Status(javax.ws.rs.core.Response.Status) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) POST(javax.ws.rs.POST) TransactionalSegmentPublisher(org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) DateTime(org.joda.time.DateTime) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) IntermediaryDataManager(org.apache.druid.indexing.worker.shuffle.IntermediaryDataManager) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Collections(java.util.Collections) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) ISE(org.apache.druid.java.util.common.ISE) ArrayList(java.util.ArrayList) List(java.util.List) TaskStatus(org.apache.druid.indexer.TaskStatus) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TaskState(org.apache.druid.indexer.TaskState) Interval(org.joda.time.Interval) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with TaskToolbox

use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.

the class PartialSegmentMergeTask method runTask.

@Override
public TaskStatus runTask(TaskToolbox toolbox) throws Exception {
    // Group partitionLocations by interval and partitionId
    final Map<Interval, Int2ObjectMap<List<PartitionLocation>>> intervalToBuckets = new HashMap<>();
    for (PartitionLocation location : ioConfig.getPartitionLocations()) {
        intervalToBuckets.computeIfAbsent(location.getInterval(), k -> new Int2ObjectOpenHashMap<>()).computeIfAbsent(location.getBucketId(), k -> new ArrayList<>()).add(location);
    }
    final List<TaskLock> locks = toolbox.getTaskActionClient().submit(new SurrogateAction<>(supervisorTaskId, new LockListAction()));
    final Map<Interval, String> intervalToVersion = Maps.newHashMapWithExpectedSize(locks.size());
    locks.forEach(lock -> {
        if (lock.isRevoked()) {
            throw new ISE("Lock[%s] is revoked", lock);
        }
        final String mustBeNull = intervalToVersion.put(lock.getInterval(), lock.getVersion());
        if (mustBeNull != null) {
            throw new ISE("Unexpected state: Two versions([%s], [%s]) for the same interval[%s]", lock.getVersion(), mustBeNull, lock.getInterval());
        }
    });
    final Stopwatch fetchStopwatch = Stopwatch.createStarted();
    final Map<Interval, Int2ObjectMap<List<File>>> intervalToUnzippedFiles = fetchSegmentFiles(toolbox, intervalToBuckets);
    final long fetchTime = fetchStopwatch.elapsed(TimeUnit.SECONDS);
    fetchStopwatch.stop();
    LOG.info("Fetch took [%s] seconds", fetchTime);
    final ParallelIndexSupervisorTaskClient taskClient = toolbox.getSupervisorTaskClientFactory().build(new ClientBasedTaskInfoProvider(toolbox.getIndexingServiceClient()), getId(), // always use a single http thread
    1, getTuningConfig().getChatHandlerTimeout(), getTuningConfig().getChatHandlerNumRetries());
    final File persistDir = toolbox.getPersistDir();
    org.apache.commons.io.FileUtils.deleteQuietly(persistDir);
    FileUtils.mkdirp(persistDir);
    final Set<DataSegment> pushedSegments = mergeAndPushSegments(toolbox, getDataSchema(), getTuningConfig(), persistDir, intervalToVersion, intervalToUnzippedFiles);
    taskClient.report(supervisorTaskId, new PushedSegmentsReport(getId(), Collections.emptySet(), pushedSegments, ImmutableMap.of()));
    return TaskStatus.success(getId());
}
Also used : TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) LockListAction(org.apache.druid.indexing.common.actions.LockListAction) Logger(org.apache.druid.java.util.common.logger.Logger) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) Arrays(java.util.Arrays) Stopwatch(com.google.common.base.Stopwatch) HashMap(java.util.HashMap) TaskResource(org.apache.druid.indexing.common.task.TaskResource) TaskStatus(org.apache.druid.indexer.TaskStatus) Pair(org.apache.druid.java.util.common.Pair) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Interval(org.joda.time.Interval) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) Map(java.util.Map) TaskLock(org.apache.druid.indexing.common.TaskLock) RetryUtils(org.apache.druid.java.util.common.RetryUtils) IndexMergerV9(org.apache.druid.segment.IndexMergerV9) FileUtils(org.apache.druid.java.util.common.FileUtils) Nullable(javax.annotation.Nullable) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) ClientBasedTaskInfoProvider(org.apache.druid.indexing.common.task.ClientBasedTaskInfoProvider) IndexMerger(org.apache.druid.segment.IndexMerger) Closer(org.apache.druid.java.util.common.io.Closer) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) QueryableIndex(org.apache.druid.segment.QueryableIndex) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) IOException(java.io.IOException) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) File(java.io.File) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) SurrogateAction(org.apache.druid.indexing.common.actions.SurrogateAction) DataSegment(org.apache.druid.timeline.DataSegment) Entry(java.util.Map.Entry) Preconditions(com.google.common.base.Preconditions) IndexIO(org.apache.druid.segment.IndexIO) DataSchema(org.apache.druid.segment.indexing.DataSchema) Collections(java.util.Collections) LockListAction(org.apache.druid.indexing.common.actions.LockListAction) HashMap(java.util.HashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) ArrayList(java.util.ArrayList) Stopwatch(com.google.common.base.Stopwatch) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) DataSegment(org.apache.druid.timeline.DataSegment) TaskLock(org.apache.druid.indexing.common.TaskLock) ISE(org.apache.druid.java.util.common.ISE) ClientBasedTaskInfoProvider(org.apache.druid.indexing.common.task.ClientBasedTaskInfoProvider) File(java.io.File) Interval(org.joda.time.Interval)

Example 3 with TaskToolbox

use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.

the class SingleTaskBackgroundRunner method run.

@Override
public ListenableFuture<TaskStatus> run(final Task task) {
    if (runningItem == null) {
        final TaskToolbox toolbox = toolboxFactory.build(task);
        final Object taskPriorityObj = task.getContextValue(TaskThreadPriority.CONTEXT_KEY);
        int taskPriority = 0;
        try {
            taskPriority = taskPriorityObj == null ? 0 : Numbers.parseInt(taskPriorityObj);
        } catch (NumberFormatException e) {
            log.error(e, "Error parsing task priority [%s] for task [%s]", taskPriorityObj, task.getId());
        }
        // Ensure an executor for that priority exists
        executorService = buildExecutorService(taskPriority);
        final ListenableFuture<TaskStatus> statusFuture = executorService.submit(new SingleTaskBackgroundRunnerCallable(task, location, toolbox));
        runningItem = new SingleTaskBackgroundRunnerWorkItem(task, location, statusFuture);
        return statusFuture;
    } else {
        throw new ISE("Already running task[%s]", runningItem.getTask().getId());
    }
}
Also used : TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) ISE(org.apache.druid.java.util.common.ISE) TaskStatus(org.apache.druid.indexer.TaskStatus)

Example 4 with TaskToolbox

use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.

the class AppenderatorDriverRealtimeIndexTaskTest method runTask.

private ListenableFuture<TaskStatus> runTask(final Task task) {
    try {
        taskStorage.insert(task, TaskStatus.running(task.getId()));
    } catch (EntryExistsException e) {
    // suppress
    }
    taskLockbox.syncFromStorage();
    final TaskToolbox toolbox = taskToolboxFactory.build(task);
    return taskExec.submit(() -> {
        try {
            if (task.isReady(toolbox.getTaskActionClient())) {
                return task.run(toolbox);
            } else {
                throw new ISE("Task is not ready");
            }
        } catch (Exception e) {
            log.warn(e, "Task failed");
            throw e;
        }
    });
}
Also used : TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) EntryExistsException(org.apache.druid.metadata.EntryExistsException) ISE(org.apache.druid.java.util.common.ISE) ParseException(org.apache.druid.java.util.common.parsers.ParseException) EntryExistsException(org.apache.druid.metadata.EntryExistsException) IOException(java.io.IOException) ExpectedException(org.junit.rules.ExpectedException)

Example 5 with TaskToolbox

use of org.apache.druid.indexing.common.TaskToolbox in project druid by druid-io.

the class IndexTaskTest method testWaitForSegmentAvailabilityMultipleSegmentsSuccess.

@Test
public void testWaitForSegmentAvailabilityMultipleSegmentsSuccess() throws IOException {
    final File tmpDir = temporaryFolder.newFolder();
    TaskToolbox mockToolbox = EasyMock.createMock(TaskToolbox.class);
    DataSegment mockDataSegment1 = EasyMock.createMock(DataSegment.class);
    DataSegment mockDataSegment2 = EasyMock.createMock(DataSegment.class);
    List<DataSegment> segmentsToWaitFor = new ArrayList<>();
    segmentsToWaitFor.add(mockDataSegment1);
    segmentsToWaitFor.add(mockDataSegment2);
    IndexTask indexTask = new IndexTask(null, null, createDefaultIngestionSpec(jsonMapper, tmpDir, new UniformGranularitySpec(Granularities.HOUR, Granularities.MINUTE, null), null, createTuningConfigWithMaxRowsPerSegment(2, true), false, false), null);
    EasyMock.expect(mockDataSegment1.getInterval()).andReturn(Intervals.of("1970-01-01/1971-01-01")).once();
    EasyMock.expect(mockDataSegment1.getVersion()).andReturn("dummyString").once();
    EasyMock.expect(mockDataSegment1.getShardSpec()).andReturn(EasyMock.createMock(ShardSpec.class)).once();
    EasyMock.expect(mockDataSegment1.getId()).andReturn(SegmentId.dummy("MockDataSource")).once();
    EasyMock.expect(mockDataSegment2.getInterval()).andReturn(Intervals.of("1971-01-01/1972-01-01")).once();
    EasyMock.expect(mockDataSegment2.getVersion()).andReturn("dummyString").once();
    EasyMock.expect(mockDataSegment2.getShardSpec()).andReturn(EasyMock.createMock(ShardSpec.class)).once();
    EasyMock.expect(mockDataSegment2.getId()).andReturn(SegmentId.dummy("MockDataSource")).once();
    EasyMock.expect(mockToolbox.getSegmentHandoffNotifierFactory()).andReturn(new NoopSegmentHandoffNotifierFactory()).once();
    EasyMock.expect(mockToolbox.getEmitter()).andReturn(new NoopServiceEmitter()).anyTimes();
    EasyMock.expect(mockDataSegment1.getDataSource()).andReturn("MockDataSource").once();
    EasyMock.replay(mockToolbox);
    EasyMock.replay(mockDataSegment1, mockDataSegment2);
    Assert.assertTrue(indexTask.waitForSegmentAvailability(mockToolbox, segmentsToWaitFor, 30000));
    EasyMock.verify(mockToolbox);
    EasyMock.verify(mockDataSegment1, mockDataSegment2);
}
Also used : TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) UniformGranularitySpec(org.apache.druid.segment.indexing.granularity.UniformGranularitySpec) ArrayList(java.util.ArrayList) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) NoopSegmentHandoffNotifierFactory(org.apache.druid.segment.realtime.plumber.NoopSegmentHandoffNotifierFactory) File(java.io.File) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

TaskToolbox (org.apache.druid.indexing.common.TaskToolbox)51 TaskStatus (org.apache.druid.indexer.TaskStatus)34 Test (org.junit.Test)31 DataSegment (org.apache.druid.timeline.DataSegment)23 TaskActionClient (org.apache.druid.indexing.common.actions.TaskActionClient)20 ISE (org.apache.druid.java.util.common.ISE)18 File (java.io.File)16 ArrayList (java.util.ArrayList)14 Map (java.util.Map)14 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)13 List (java.util.List)12 IOException (java.io.IOException)11 Preconditions (com.google.common.base.Preconditions)10 Collections (java.util.Collections)10 Pair (org.apache.druid.java.util.common.Pair)10 Interval (org.joda.time.Interval)10 ImmutableMap (com.google.common.collect.ImmutableMap)9 HashMap (java.util.HashMap)9 Set (java.util.Set)9 Nullable (javax.annotation.Nullable)9