Search in sources :

Example 1 with OrderedSequenceNumber

use of org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber in project druid by druid-io.

the class SeekableStreamSupervisor method createNewTasks.

private void createNewTasks() throws JsonProcessingException {
    // update the checkpoints in the taskGroup to latest ones so that new tasks do not read what is already published
    verifyAndMergeCheckpoints(activelyReadingTaskGroups.values().stream().filter(taskGroup -> taskGroup.tasks.size() < ioConfig.getReplicas()).collect(Collectors.toList()));
    // check that there is a current task group for each group of partitions in [partitionGroups]
    for (Integer groupId : partitionGroups.keySet()) {
        if (!activelyReadingTaskGroups.containsKey(groupId)) {
            log.info("Creating new task group [%d] for partitions %s", groupId, partitionGroups.get(groupId));
            Optional<DateTime> minimumMessageTime;
            if (ioConfig.getLateMessageRejectionStartDateTime().isPresent()) {
                minimumMessageTime = Optional.of(ioConfig.getLateMessageRejectionStartDateTime().get());
            } else {
                minimumMessageTime = (ioConfig.getLateMessageRejectionPeriod().isPresent() ? Optional.of(DateTimes.nowUtc().minus(ioConfig.getLateMessageRejectionPeriod().get())) : Optional.absent());
            }
            Optional<DateTime> maximumMessageTime = (ioConfig.getEarlyMessageRejectionPeriod().isPresent() ? Optional.of(DateTimes.nowUtc().plus(ioConfig.getTaskDuration()).plus(ioConfig.getEarlyMessageRejectionPeriod().get())) : Optional.absent());
            final Map<PartitionIdType, OrderedSequenceNumber<SequenceOffsetType>> unfilteredStartingOffsets = generateStartingSequencesForPartitionGroup(groupId);
            final Map<PartitionIdType, OrderedSequenceNumber<SequenceOffsetType>> startingOffsets;
            if (supportsPartitionExpiration()) {
                startingOffsets = filterExpiredPartitionsFromStartingOffsets(unfilteredStartingOffsets);
            } else {
                startingOffsets = unfilteredStartingOffsets;
            }
            ImmutableMap<PartitionIdType, SequenceOffsetType> simpleStartingOffsets = startingOffsets.entrySet().stream().filter(entry -> entry.getValue().get() != null).collect(Collectors.collectingAndThen(Collectors.toMap(Entry::getKey, entry -> entry.getValue().get()), ImmutableMap::copyOf));
            ImmutableMap<PartitionIdType, SequenceOffsetType> simpleUnfilteredStartingOffsets;
            if (supportsPartitionExpiration()) {
                simpleUnfilteredStartingOffsets = unfilteredStartingOffsets.entrySet().stream().filter(entry -> entry.getValue().get() != null).collect(Collectors.collectingAndThen(Collectors.toMap(Entry::getKey, entry -> entry.getValue().get()), ImmutableMap::copyOf));
            } else {
                simpleUnfilteredStartingOffsets = simpleStartingOffsets;
            }
            Set<PartitionIdType> exclusiveStartSequenceNumberPartitions;
            if (!useExclusiveStartingSequence) {
                exclusiveStartSequenceNumberPartitions = Collections.emptySet();
            } else {
                exclusiveStartSequenceNumberPartitions = startingOffsets.entrySet().stream().filter(x -> x.getValue().get() != null && x.getValue().isExclusive()).map(Entry::getKey).collect(Collectors.toSet());
            }
            activelyReadingTaskGroups.put(groupId, new TaskGroup(groupId, simpleStartingOffsets, simpleUnfilteredStartingOffsets, minimumMessageTime, maximumMessageTime, exclusiveStartSequenceNumberPartitions));
        }
    }
    // iterate through all the current task groups and make sure each one has the desired number of replica tasks
    boolean createdTask = false;
    for (Entry<Integer, TaskGroup> entry : activelyReadingTaskGroups.entrySet()) {
        TaskGroup taskGroup = entry.getValue();
        Integer groupId = entry.getKey();
        if (taskGroup.startingSequences == null || taskGroup.startingSequences.size() == 0 || taskGroup.startingSequences.values().stream().allMatch(x -> x == null || isEndOfShard(x))) {
            log.debug("Nothing to read in any partition for taskGroup [%d], skipping task creation", groupId);
            continue;
        }
        if (ioConfig.getReplicas() > taskGroup.tasks.size()) {
            log.info("Number of tasks [%d] does not match configured numReplicas [%d] in task group [%d], creating more tasks", taskGroup.tasks.size(), ioConfig.getReplicas(), groupId);
            createTasksForGroup(groupId, ioConfig.getReplicas() - taskGroup.tasks.size());
            createdTask = true;
        }
    }
    if (createdTask && firstRunTime.isBeforeNow()) {
        // Schedule a run event after a short delay to update our internal data structures with the new tasks that were
        // just created. This is mainly for the benefit of the status API in situations where the run period is lengthy.
        scheduledExec.schedule(buildRunTask(), 5000, TimeUnit.MILLISECONDS);
    }
}
Also used : SeekableStreamIndexTask(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask) Pair(org.apache.druid.java.util.common.Pair) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) Optional(com.google.common.base.Optional) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) Duration(java.time.Duration) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) Execs(org.apache.druid.java.util.common.concurrent.Execs) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap) LagStats(org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats) TaskState(org.apache.druid.indexer.TaskState) Stream(java.util.stream.Stream) Predicate(com.google.common.base.Predicate) RowIngestionMetersFactory(org.apache.druid.segment.incremental.RowIngestionMetersFactory) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) Joiner(com.google.common.base.Joiner) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Iterables(com.google.common.collect.Iterables) SupervisorStateManager(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManager) Callable(java.util.concurrent.Callable) TaskStatus(org.apache.druid.indexer.TaskStatus) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) EntryExistsException(org.apache.druid.metadata.EntryExistsException) SeekableStreamIndexTaskIOConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskIOConfig) StringComparators(org.apache.druid.query.ordering.StringComparators) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BiConsumer(java.util.function.BiConsumer) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) AutoScalerConfig(org.apache.druid.indexing.seekablestream.supervisor.autoscaler.AutoScalerConfig) RetryUtils(org.apache.druid.java.util.common.RetryUtils) SeekableStreamIndexTaskClientFactory(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClientFactory) Nullable(javax.annotation.Nullable) SeekableStreamIndexTaskTuningConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTuningConfig) SeekableStreamIndexTaskClient(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClient) BlockingDeque(java.util.concurrent.BlockingDeque) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) TaskLocation(org.apache.druid.indexer.TaskLocation) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Futures(com.google.common.util.concurrent.Futures) TaskInfoProvider(org.apache.druid.indexing.common.TaskInfoProvider) TreeMap(java.util.TreeMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) SupervisorReport(org.apache.druid.indexing.overlord.supervisor.SupervisorReport) Preconditions(com.google.common.base.Preconditions) DataSchema(org.apache.druid.segment.indexing.DataSchema) SeekableStreamSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamSequenceNumbers) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) TimeoutException(java.util.concurrent.TimeoutException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SeekableStreamStartSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamStartSequenceNumbers) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) List(java.util.List) MetadataSupervisorManager(org.apache.druid.metadata.MetadataSupervisorManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) Entry(java.util.Map.Entry) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) SortedMap(java.util.SortedMap) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) HashMap(java.util.HashMap) HashSet(java.util.HashSet) MapperFeature(com.fasterxml.jackson.databind.MapperFeature) ImmutableList(com.google.common.collect.ImmutableList) IndexTaskClient(org.apache.druid.indexing.common.IndexTaskClient) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) ExecutorService(java.util.concurrent.ExecutorService) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) TimeUnit(java.util.concurrent.TimeUnit) TaskRunnerWorkItem(org.apache.druid.indexing.overlord.TaskRunnerWorkItem) VisibleForTesting(com.google.common.annotations.VisibleForTesting) DigestUtils(org.apache.commons.codec.digest.DigestUtils) Supervisor(org.apache.druid.indexing.overlord.supervisor.Supervisor) Comparator(java.util.Comparator) Collections(java.util.Collections) SeekableStreamIndexTaskRunner(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) DateTime(org.joda.time.DateTime) ImmutableMap(com.google.common.collect.ImmutableMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Entry(java.util.Map.Entry)

Example 2 with OrderedSequenceNumber

use of org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber in project druid by druid-io.

the class SeekableStreamSupervisor method verifyAndMergeCheckpoints.

/**
 * This method does two things -
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published sequences.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();
    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>> checkpointsFuture = taskClient.getCheckpointsAsync(taskId, true);
        futures.add(checkpointsFuture);
        taskIds.add(taskId);
    }
    try {
        List<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>> futuresResult = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    stateManager.recordThrowableEvent(e);
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId, "Exception[%s] while getting checkpoints", e.getClass());
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    final DataSourceMetadata rawDataSourceMetadata = indexerMetadataStorageCoordinator.retrieveDataSourceMetadata(dataSource);
    if (rawDataSourceMetadata != null && !checkSourceMetadataMatch(rawDataSourceMetadata)) {
        throw new IAE("Datasource metadata instance does not match required, found instance of [%s]", rawDataSourceMetadata.getClass());
    }
    @SuppressWarnings("unchecked") final SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> latestDataSourceMetadata = (SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType>) rawDataSourceMetadata;
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null && latestDataSourceMetadata.getSeekableStreamSequenceNumbers() != null && ioConfig.getStream().equals(latestDataSourceMetadata.getSeekableStreamSequenceNumbers().getStream());
    final Map<PartitionIdType, SequenceOffsetType> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap();
    } else {
        latestOffsetsFromDb = null;
    }
    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));
    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;
    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // store
            if (taskCheckpoints.entrySet().stream().anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream().allMatch(partitionOffset -> {
                OrderedSequenceNumber<SequenceOffsetType> sequence = makeSequenceNumber(partitionOffset.getValue());
                OrderedSequenceNumber<SequenceOffsetType> latestOffset = makeSequenceNumber(latestOffsetsFromDb == null ? partitionOffset.getValue() : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(), partitionOffset.getValue()));
                return sequence.compareTo(latestOffset) == 0;
            }) && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey())) || (pendingCompletionTaskGroups.getOrDefault(groupId, new CopyOnWriteArrayList<>()).size() > 0 && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<PartitionIdType, SequenceOffsetType>> latestCheckpoints = new TreeMap<>(taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.checkpointSequences.clear();
                taskGroup.checkpointSequences.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId, taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.checkpointSequences.firstKey()) == null || !(taskCheckpoints.get(taskGroup.checkpointSequences.firstKey()).equals(taskGroup.checkpointSequences.firstEntry().getValue())) || taskCheckpoints.tailMap(taskGroup.checkpointSequences.firstKey()).size() != taskGroup.checkpointSequences.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId, taskCheckpoints, taskGroup.checkpointSequences);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }
    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0 && pendingCompletionTaskGroups.getOrDefault(groupId, new CopyOnWriteArrayList<>()).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest sequence information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        activelyReadingTaskGroups.remove(groupId);
        for (PartitionIdType partitionId : taskGroup.startingSequences.keySet()) {
            partitionOffsets.put(partitionId, getNotSetMarker());
        }
    }
    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs)).forEach(sequenceCheckpoint -> {
        killTask(sequenceCheckpoint.lhs, "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest " + "persisted sequences in metadata store [%s]", sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.checkpointSequences, latestOffsetsFromDb);
        taskGroup.tasks.remove(sequenceCheckpoint.lhs);
    });
}
Also used : SeekableStreamIndexTask(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask) Pair(org.apache.druid.java.util.common.Pair) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) Optional(com.google.common.base.Optional) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) Duration(java.time.Duration) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) Execs(org.apache.druid.java.util.common.concurrent.Execs) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap) LagStats(org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats) TaskState(org.apache.druid.indexer.TaskState) Stream(java.util.stream.Stream) Predicate(com.google.common.base.Predicate) RowIngestionMetersFactory(org.apache.druid.segment.incremental.RowIngestionMetersFactory) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) Joiner(com.google.common.base.Joiner) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Iterables(com.google.common.collect.Iterables) SupervisorStateManager(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManager) Callable(java.util.concurrent.Callable) TaskStatus(org.apache.druid.indexer.TaskStatus) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) EntryExistsException(org.apache.druid.metadata.EntryExistsException) SeekableStreamIndexTaskIOConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskIOConfig) StringComparators(org.apache.druid.query.ordering.StringComparators) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BiConsumer(java.util.function.BiConsumer) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) AutoScalerConfig(org.apache.druid.indexing.seekablestream.supervisor.autoscaler.AutoScalerConfig) RetryUtils(org.apache.druid.java.util.common.RetryUtils) SeekableStreamIndexTaskClientFactory(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClientFactory) Nullable(javax.annotation.Nullable) SeekableStreamIndexTaskTuningConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTuningConfig) SeekableStreamIndexTaskClient(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClient) BlockingDeque(java.util.concurrent.BlockingDeque) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) TaskLocation(org.apache.druid.indexer.TaskLocation) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Futures(com.google.common.util.concurrent.Futures) TaskInfoProvider(org.apache.druid.indexing.common.TaskInfoProvider) TreeMap(java.util.TreeMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) SupervisorReport(org.apache.druid.indexing.overlord.supervisor.SupervisorReport) Preconditions(com.google.common.base.Preconditions) DataSchema(org.apache.druid.segment.indexing.DataSchema) SeekableStreamSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamSequenceNumbers) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) TimeoutException(java.util.concurrent.TimeoutException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SeekableStreamStartSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamStartSequenceNumbers) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) List(java.util.List) MetadataSupervisorManager(org.apache.druid.metadata.MetadataSupervisorManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) Entry(java.util.Map.Entry) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) SortedMap(java.util.SortedMap) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) HashMap(java.util.HashMap) HashSet(java.util.HashSet) MapperFeature(com.fasterxml.jackson.databind.MapperFeature) ImmutableList(com.google.common.collect.ImmutableList) IndexTaskClient(org.apache.druid.indexing.common.IndexTaskClient) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) ExecutorService(java.util.concurrent.ExecutorService) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) TimeUnit(java.util.concurrent.TimeUnit) TaskRunnerWorkItem(org.apache.druid.indexing.overlord.TaskRunnerWorkItem) VisibleForTesting(com.google.common.annotations.VisibleForTesting) DigestUtils(org.apache.commons.codec.digest.DigestUtils) Supervisor(org.apache.druid.indexing.overlord.supervisor.Supervisor) Comparator(java.util.Comparator) Collections(java.util.Collections) SeekableStreamIndexTaskRunner(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) Pair(org.apache.druid.java.util.common.Pair) HashSet(java.util.HashSet) TreeMap(java.util.TreeMap) IAE(org.apache.druid.java.util.common.IAE) EntryExistsException(org.apache.druid.metadata.EntryExistsException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap) TreeMap(java.util.TreeMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Aggregations

JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)2 MapperFeature (com.fasterxml.jackson.databind.MapperFeature)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 Function (com.google.common.base.Function)2 Joiner (com.google.common.base.Joiner)2 Optional (com.google.common.base.Optional)2 Preconditions (com.google.common.base.Preconditions)2 Predicate (com.google.common.base.Predicate)2 ImmutableList (com.google.common.collect.ImmutableList)2 ImmutableMap (com.google.common.collect.ImmutableMap)2 ImmutableSet (com.google.common.collect.ImmutableSet)2 Iterables (com.google.common.collect.Iterables)2 Sets (com.google.common.collect.Sets)2 Futures (com.google.common.util.concurrent.Futures)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)2 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)2 Int2ObjectLinkedOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap)2 Int2ObjectMap (it.unimi.dsi.fastutil.ints.Int2ObjectMap)2