Search in sources :

Example 1 with StreamException

use of org.apache.druid.indexing.seekablestream.common.StreamException in project druid by druid-io.

the class SeekableStreamSupervisor method updatePartitionDataFromStream.

private boolean updatePartitionDataFromStream() {
    List<PartitionIdType> previousPartitionIds = new ArrayList<>(partitionIds);
    Set<PartitionIdType> partitionIdsFromSupplier;
    recordSupplierLock.lock();
    try {
        partitionIdsFromSupplier = recordSupplier.getPartitionIds(ioConfig.getStream());
        if (shouldSkipIgnorablePartitions()) {
            partitionIdsFromSupplier.removeAll(computeIgnorablePartitionIds());
        }
    } catch (Exception e) {
        stateManager.recordThrowableEvent(e);
        log.warn("Could not fetch partitions for topic/stream [%s]: %s", ioConfig.getStream(), e.getMessage());
        log.debug(e, "full stack trace");
        return false;
    } finally {
        recordSupplierLock.unlock();
    }
    if (partitionIdsFromSupplier == null || partitionIdsFromSupplier.size() == 0) {
        String errMsg = StringUtils.format("No partitions found for stream [%s]", ioConfig.getStream());
        stateManager.recordThrowableEvent(new StreamException(new ISE(errMsg)));
        log.warn(errMsg);
        return false;
    }
    log.debug("Found [%d] partitions for stream [%s]", partitionIdsFromSupplier.size(), ioConfig.getStream());
    Map<PartitionIdType, SequenceOffsetType> storedMetadata = getOffsetsFromMetadataStorage();
    Set<PartitionIdType> storedPartitions = storedMetadata.keySet();
    Set<PartitionIdType> closedPartitions = storedMetadata.entrySet().stream().filter(x -> isEndOfShard(x.getValue())).map(Entry::getKey).collect(Collectors.toSet());
    Set<PartitionIdType> previouslyExpiredPartitions = storedMetadata.entrySet().stream().filter(x -> isShardExpirationMarker(x.getValue())).map(Entry::getKey).collect(Collectors.toSet());
    Set<PartitionIdType> partitionIdsFromSupplierWithoutPreviouslyExpiredPartitions = Sets.difference(partitionIdsFromSupplier, previouslyExpiredPartitions);
    Set<PartitionIdType> activePartitionsIdsFromSupplier = Sets.difference(partitionIdsFromSupplierWithoutPreviouslyExpiredPartitions, closedPartitions);
    Set<PartitionIdType> newlyClosedPartitions = Sets.intersection(closedPartitions, new HashSet<>(previousPartitionIds));
    log.debug("active partitions from supplier: " + activePartitionsIdsFromSupplier);
    if (partitionIdsFromSupplierWithoutPreviouslyExpiredPartitions.size() != partitionIdsFromSupplier.size()) {
        // this should never happen, but we check for it and exclude the expired partitions if they somehow reappear
        log.warn("Previously expired partitions [%s] were present in the current list [%s] from the record supplier.", previouslyExpiredPartitions, partitionIdsFromSupplier);
    }
    if (activePartitionsIdsFromSupplier.size() == 0) {
        String errMsg = StringUtils.format("No active partitions found for stream [%s] after removing closed and previously expired partitions", ioConfig.getStream());
        stateManager.recordThrowableEvent(new StreamException(new ISE(errMsg)));
        log.warn(errMsg);
        return false;
    }
    boolean initialPartitionDiscovery = this.partitionIds.isEmpty();
    for (PartitionIdType partitionId : partitionIdsFromSupplierWithoutPreviouslyExpiredPartitions) {
        if (closedPartitions.contains(partitionId)) {
            log.info("partition [%s] is closed and has no more data, skipping.", partitionId);
            continue;
        }
        if (!this.partitionIds.contains(partitionId)) {
            partitionIds.add(partitionId);
            if (!initialPartitionDiscovery) {
                subsequentlyDiscoveredPartitions.add(partitionId);
            }
        }
    }
    // partitions across tasks.
    if (supportsPartitionExpiration()) {
        cleanupClosedAndExpiredPartitions(storedPartitions, newlyClosedPartitions, activePartitionsIdsFromSupplier, previouslyExpiredPartitions, partitionIdsFromSupplier);
    }
    Int2ObjectMap<List<PartitionIdType>> newlyDiscovered = new Int2ObjectLinkedOpenHashMap<>();
    for (PartitionIdType partitionId : activePartitionsIdsFromSupplier) {
        int taskGroupId = getTaskGroupIdForPartition(partitionId);
        Set<PartitionIdType> partitionGroup = partitionGroups.computeIfAbsent(taskGroupId, k -> new HashSet<>());
        partitionGroup.add(partitionId);
        if (partitionOffsets.putIfAbsent(partitionId, getNotSetMarker()) == null) {
            log.debug("New partition [%s] discovered for stream [%s], added to task group [%d]", partitionId, ioConfig.getStream(), taskGroupId);
            newlyDiscovered.computeIfAbsent(taskGroupId, k -> new ArrayList<>()).add(partitionId);
        }
    }
    if (newlyDiscovered.size() > 0) {
        for (Int2ObjectMap.Entry<List<PartitionIdType>> taskGroupPartitions : newlyDiscovered.int2ObjectEntrySet()) {
            log.info("New partitions %s discovered for stream [%s], added to task group [%s]", taskGroupPartitions.getValue(), ioConfig.getStream(), taskGroupPartitions.getIntKey());
        }
    }
    if (!partitionIds.equals(previousPartitionIds)) {
        assignRecordSupplierToPartitionIds();
        // repartitioning quickly by creating new tasks
        for (TaskGroup taskGroup : activelyReadingTaskGroups.values()) {
            if (!taskGroup.taskIds().isEmpty()) {
                // Partitions have changed and we are managing active tasks - set an early publish time
                // at the current time + repartitionTransitionDuration.
                // This allows time for the stream to start writing to the new partitions after repartitioning.
                // For Kinesis ingestion, this cooldown time is particularly useful, lowering the possibility of
                // the new shards being empty, which can cause issues presently
                // (see https://github.com/apache/druid/issues/7600)
                earlyStopTime = DateTimes.nowUtc().plus(tuningConfig.getRepartitionTransitionDuration());
                log.info("Previous partition set [%s] has changed to [%s] - requesting that tasks stop after [%s] at [%s]", previousPartitionIds, partitionIds, tuningConfig.getRepartitionTransitionDuration(), earlyStopTime);
                break;
            }
        }
    }
    return true;
}
Also used : SeekableStreamIndexTask(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask) Pair(org.apache.druid.java.util.common.Pair) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) Optional(com.google.common.base.Optional) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) Duration(java.time.Duration) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) Execs(org.apache.druid.java.util.common.concurrent.Execs) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap) LagStats(org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats) TaskState(org.apache.druid.indexer.TaskState) Stream(java.util.stream.Stream) Predicate(com.google.common.base.Predicate) RowIngestionMetersFactory(org.apache.druid.segment.incremental.RowIngestionMetersFactory) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) Joiner(com.google.common.base.Joiner) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Iterables(com.google.common.collect.Iterables) SupervisorStateManager(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManager) Callable(java.util.concurrent.Callable) TaskStatus(org.apache.druid.indexer.TaskStatus) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) EntryExistsException(org.apache.druid.metadata.EntryExistsException) SeekableStreamIndexTaskIOConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskIOConfig) StringComparators(org.apache.druid.query.ordering.StringComparators) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BiConsumer(java.util.function.BiConsumer) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) AutoScalerConfig(org.apache.druid.indexing.seekablestream.supervisor.autoscaler.AutoScalerConfig) RetryUtils(org.apache.druid.java.util.common.RetryUtils) SeekableStreamIndexTaskClientFactory(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClientFactory) Nullable(javax.annotation.Nullable) SeekableStreamIndexTaskTuningConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTuningConfig) SeekableStreamIndexTaskClient(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClient) BlockingDeque(java.util.concurrent.BlockingDeque) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) TaskLocation(org.apache.druid.indexer.TaskLocation) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Futures(com.google.common.util.concurrent.Futures) TaskInfoProvider(org.apache.druid.indexing.common.TaskInfoProvider) TreeMap(java.util.TreeMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) SupervisorReport(org.apache.druid.indexing.overlord.supervisor.SupervisorReport) Preconditions(com.google.common.base.Preconditions) DataSchema(org.apache.druid.segment.indexing.DataSchema) SeekableStreamSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamSequenceNumbers) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) TimeoutException(java.util.concurrent.TimeoutException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SeekableStreamStartSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamStartSequenceNumbers) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) List(java.util.List) MetadataSupervisorManager(org.apache.druid.metadata.MetadataSupervisorManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) Entry(java.util.Map.Entry) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) SortedMap(java.util.SortedMap) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) HashMap(java.util.HashMap) HashSet(java.util.HashSet) MapperFeature(com.fasterxml.jackson.databind.MapperFeature) ImmutableList(com.google.common.collect.ImmutableList) IndexTaskClient(org.apache.druid.indexing.common.IndexTaskClient) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) ExecutorService(java.util.concurrent.ExecutorService) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) TimeUnit(java.util.concurrent.TimeUnit) TaskRunnerWorkItem(org.apache.druid.indexing.overlord.TaskRunnerWorkItem) VisibleForTesting(com.google.common.annotations.VisibleForTesting) DigestUtils(org.apache.commons.codec.digest.DigestUtils) Supervisor(org.apache.druid.indexing.overlord.supervisor.Supervisor) Comparator(java.util.Comparator) Collections(java.util.Collections) SeekableStreamIndexTaskRunner(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) EntryExistsException(org.apache.druid.metadata.EntryExistsException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) ISE(org.apache.druid.java.util.common.ISE) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap)

Example 2 with StreamException

use of org.apache.druid.indexing.seekablestream.common.StreamException in project druid by druid-io.

the class SeekableStreamSupervisorStateManagerTest method testStreamFailureLostContact.

@Test
public void testStreamFailureLostContact() {
    // clean run without errors
    stateManager.markRunFinished();
    Assert.assertEquals(BasicState.RUNNING, stateManager.getSupervisorState());
    for (int i = 0; i < config.getUnhealthinessThreshold(); i++) {
        Assert.assertEquals(BasicState.RUNNING, stateManager.getSupervisorState());
        stateManager.recordThrowableEvent(new StreamException(new IllegalStateException("DOH!")));
        stateManager.markRunFinished();
    }
    Assert.assertEquals(SeekableStreamState.LOST_CONTACT_WITH_STREAM, stateManager.getSupervisorState());
    Assert.assertEquals(BasicState.UNHEALTHY_SUPERVISOR, stateManager.getSupervisorState().getBasicState());
    Assert.assertEquals(config.getUnhealthinessThreshold(), stateManager.getExceptionEvents().size());
    stateManager.getExceptionEvents().forEach(x -> {
        Assert.assertTrue(((SeekableStreamExceptionEvent) x).isStreamException());
        Assert.assertEquals(IllegalStateException.class.getName(), x.getExceptionClass());
    });
}
Also used : StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) Test(org.junit.Test)

Example 3 with StreamException

use of org.apache.druid.indexing.seekablestream.common.StreamException in project druid by druid-io.

the class SeekableStreamSupervisorStateManagerTest method testGetThrowableEvents.

@Test
public void testGetThrowableEvents() {
    List<Exception> exceptions = ImmutableList.of(new StreamException(new UnsupportedOperationException("oof")), new NullPointerException("oof"), new RuntimeException(new StreamException(new Exception("oof"))), new RuntimeException(new IllegalArgumentException("oof")));
    for (Exception exception : exceptions) {
        stateManager.recordThrowableEvent(exception);
        stateManager.markRunFinished();
    }
    Assert.assertEquals(BasicState.UNHEALTHY_SUPERVISOR, stateManager.getSupervisorState());
    List<Pair<String, Boolean>> expected = ImmutableList.of(Pair.of("java.lang.UnsupportedOperationException", true), Pair.of("java.lang.NullPointerException", false), Pair.of("java.lang.Exception", true), Pair.of("java.lang.IllegalArgumentException", false));
    Iterator<SupervisorStateManager.ExceptionEvent> it = stateManager.getExceptionEvents().iterator();
    expected.forEach(x -> {
        SupervisorStateManager.ExceptionEvent event = it.next();
        Assert.assertNotNull(event.getMessage());
        Assert.assertEquals(x.lhs, event.getExceptionClass());
        Assert.assertEquals(x.rhs, ((SeekableStreamExceptionEvent) event).isStreamException());
    });
    Assert.assertFalse(it.hasNext());
}
Also used : SeekableStreamExceptionEvent(org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisorStateManager.SeekableStreamExceptionEvent) IOException(java.io.IOException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) SupervisorStateManager(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManager) Pair(org.apache.druid.java.util.common.Pair) Test(org.junit.Test)

Example 4 with StreamException

use of org.apache.druid.indexing.seekablestream.common.StreamException in project druid by druid-io.

the class KafkaSupervisor method updatePartitionLagFromStream.

@Override
protected void updatePartitionLagFromStream() {
    getRecordSupplierLock().lock();
    try {
        Set<Integer> partitionIds;
        try {
            partitionIds = recordSupplier.getPartitionIds(getIoConfig().getStream());
        } catch (Exception e) {
            log.warn("Could not fetch partitions for topic/stream [%s]", getIoConfig().getStream());
            throw new StreamException(e);
        }
        Set<StreamPartition<Integer>> partitions = partitionIds.stream().map(e -> new StreamPartition<>(getIoConfig().getStream(), e)).collect(Collectors.toSet());
        recordSupplier.seekToLatest(partitions);
        // this method isn't actually computing the lag, just fetching the latests offsets from the stream. This is
        // because we currently only have record lag for kafka, which can be lazily computed by subtracting the highest
        // task offsets from the latest offsets from the stream when it is needed
        latestSequenceFromStream = partitions.stream().collect(Collectors.toMap(StreamPartition::getPartitionId, recordSupplier::getPosition));
    } catch (InterruptedException e) {
        throw new StreamException(e);
    } finally {
        getRecordSupplierLock().unlock();
    }
}
Also used : StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) KafkaDataSourceMetadata(org.apache.druid.indexing.kafka.KafkaDataSourceMetadata) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) DruidMonitorSchedulerConfig(org.apache.druid.server.metrics.DruidMonitorSchedulerConfig) SeekableStreamIndexTask(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask) TaskResource(org.apache.druid.indexing.common.task.TaskResource) SeekableStreamSupervisor(org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor) ArrayList(java.util.ArrayList) IdUtils(org.apache.druid.common.utils.IdUtils) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) Task(org.apache.druid.indexing.common.task.Task) SeekableStreamIndexTaskIOConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskIOConfig) Map(java.util.Map) TypeReference(com.fasterxml.jackson.core.type.TypeReference) SeekableStreamSupervisorReportPayload(org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisorReportPayload) Nullable(javax.annotation.Nullable) KafkaIndexTaskIOConfig(org.apache.druid.indexing.kafka.KafkaIndexTaskIOConfig) SeekableStreamIndexTaskTuningConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTuningConfig) SeekableStreamStartSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamStartSequenceNumbers) SeekableStreamEndSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamEndSequenceNumbers) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Collectors(java.util.stream.Collectors) LagStats(org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) KafkaRecordEntity(org.apache.druid.data.input.kafka.KafkaRecordEntity) List(java.util.List) SeekableStreamSupervisorIOConfig(org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisorIOConfig) KafkaSequenceNumber(org.apache.druid.indexing.kafka.KafkaSequenceNumber) TreeMap(java.util.TreeMap) KafkaIndexTask(org.apache.druid.indexing.kafka.KafkaIndexTask) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) RowIngestionMetersFactory(org.apache.druid.segment.incremental.RowIngestionMetersFactory) Entry(java.util.Map.Entry) KafkaIndexTaskClientFactory(org.apache.druid.indexing.kafka.KafkaIndexTaskClientFactory) KafkaRecordSupplier(org.apache.druid.indexing.kafka.KafkaRecordSupplier) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) VisibleForTesting(com.google.common.annotations.VisibleForTesting) KafkaIndexTaskTuningConfig(org.apache.druid.indexing.kafka.KafkaIndexTaskTuningConfig) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) Collections(java.util.Collections) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException)

Example 5 with StreamException

use of org.apache.druid.indexing.seekablestream.common.StreamException in project druid by druid-io.

the class KafkaRecordSupplier method getKafkaDeserializer.

private static Deserializer getKafkaDeserializer(Properties properties, String kafkaConfigKey) {
    Deserializer deserializerObject;
    try {
        Class deserializerClass = Class.forName(properties.getProperty(kafkaConfigKey, ByteArrayDeserializer.class.getTypeName()));
        Method deserializerMethod = deserializerClass.getMethod("deserialize", String.class, byte[].class);
        Type deserializerReturnType = deserializerMethod.getGenericReturnType();
        if (deserializerReturnType == byte[].class) {
            deserializerObject = (Deserializer) deserializerClass.getConstructor().newInstance();
        } else {
            throw new IllegalArgumentException("Kafka deserializers must return a byte array (byte[]), " + deserializerClass.getName() + " returns " + deserializerReturnType.getTypeName());
        }
    } catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | IllegalAccessException | InvocationTargetException e) {
        throw new StreamException(e);
    }
    return deserializerObject;
}
Also used : Type(java.lang.reflect.Type) Deserializer(org.apache.kafka.common.serialization.Deserializer) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Method(java.lang.reflect.Method) InvocationTargetException(java.lang.reflect.InvocationTargetException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException)

Aggregations

StreamException (org.apache.druid.indexing.seekablestream.common.StreamException)7 Test (org.junit.Test)3 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Collections (java.util.Collections)2 List (java.util.List)2 Map (java.util.Map)2 Entry (java.util.Map.Entry)2 Set (java.util.Set)2 TreeMap (java.util.TreeMap)2 Collectors (java.util.stream.Collectors)2 Nullable (javax.annotation.Nullable)2 Task (org.apache.druid.indexing.common.task.Task)2 DataSourceMetadata (org.apache.druid.indexing.overlord.DataSourceMetadata)2 IndexerMetadataStorageCoordinator (org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator)2 TaskMaster (org.apache.druid.indexing.overlord.TaskMaster)2 TaskStorage (org.apache.druid.indexing.overlord.TaskStorage)2