Search in sources :

Example 96 with IAE

use of org.apache.druid.java.util.common.IAE in project druid by druid-io.

the class UnifiedIndexerAppenderatorsManager method getBundle.

@VisibleForTesting
<T> DatasourceBundle getBundle(final Query<T> query) {
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    final TableDataSource table = analysis.getBaseTableDataSource().orElseThrow(() -> new ISE("Cannot handle datasource: %s", analysis.getDataSource()));
    final DatasourceBundle bundle;
    synchronized (this) {
        bundle = datasourceBundles.get(table.getName());
    }
    if (bundle == null) {
        throw new IAE("Could not find segment walker for datasource [%s]", table.getName());
    }
    return bundle;
}
Also used : TableDataSource(org.apache.druid.query.TableDataSource) ISE(org.apache.druid.java.util.common.ISE) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) IAE(org.apache.druid.java.util.common.IAE) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 97 with IAE

use of org.apache.druid.java.util.common.IAE in project druid by druid-io.

the class MetadataStoreCredentialsValidator method validateCredentials.

@Override
@Nullable
public AuthenticationResult validateCredentials(String authenticatorName, String authorizerName, String username, char[] password) {
    Map<String, BasicAuthenticatorUser> userMap = cacheManager.get().getUserMap(authenticatorName);
    if (userMap == null) {
        throw new IAE("No userMap is available for authenticator with prefix: [%s]", authenticatorName);
    }
    BasicAuthenticatorUser user = userMap.get(username);
    if (user == null) {
        return null;
    }
    BasicAuthenticatorCredentials credentials = user.getCredentials();
    if (credentials == null) {
        return null;
    }
    byte[] recalculatedHash = BasicAuthUtils.hashPassword(password, credentials.getSalt(), credentials.getIterations());
    if (Arrays.equals(recalculatedHash, credentials.getHash())) {
        return new AuthenticationResult(username, authorizerName, authenticatorName, null);
    } else {
        LOG.debug("Password incorrect for metadata store user %s", username);
        throw new BasicSecurityAuthenticationException("User metadata store authentication failed.");
    }
}
Also used : BasicSecurityAuthenticationException(org.apache.druid.security.basic.BasicSecurityAuthenticationException) BasicAuthenticatorCredentials(org.apache.druid.security.basic.authentication.entity.BasicAuthenticatorCredentials) BasicAuthenticatorUser(org.apache.druid.security.basic.authentication.entity.BasicAuthenticatorUser) IAE(org.apache.druid.java.util.common.IAE) AuthenticationResult(org.apache.druid.server.security.AuthenticationResult) Nullable(javax.annotation.Nullable)

Example 98 with IAE

use of org.apache.druid.java.util.common.IAE in project druid by druid-io.

the class SeekableStreamSupervisor method validateMetadataPartitionExpiration.

/**
 * Perform a sanity check on the datasource metadata returned by
 * {@link #createDataSourceMetadataWithExpiredPartitions}.
 * <p>
 * Specifically, we check that the cleaned metadata's partitions are a subset of the original metadata's partitions,
 * that newly expired partitions are marked as expired, and that none of the offsets for the non-expired partitions
 * have changed.
 *
 * @param oldMetadata     metadata containing expired partitions.
 * @param cleanedMetadata new metadata without expired partitions, generated by the subclass
 */
private void validateMetadataPartitionExpiration(Set<PartitionIdType> newlyExpiredPartitions, SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> oldMetadata, SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> cleanedMetadata) {
    Map<PartitionIdType, SequenceOffsetType> oldPartitionSeqNos = oldMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap();
    Map<PartitionIdType, SequenceOffsetType> cleanedPartitionSeqNos = cleanedMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap();
    for (Entry<PartitionIdType, SequenceOffsetType> cleanedPartitionSeqNo : cleanedPartitionSeqNos.entrySet()) {
        if (!oldPartitionSeqNos.containsKey(cleanedPartitionSeqNo.getKey())) {
            // cleaning the expired partitions added a partition somehow
            throw new IAE("Cleaned partition map [%s] contains unexpected partition ID [%s], original partition map: [%s]", cleanedPartitionSeqNos, cleanedPartitionSeqNo.getKey(), oldPartitionSeqNos);
        }
        SequenceOffsetType oldOffset = oldPartitionSeqNos.get(cleanedPartitionSeqNo.getKey());
        if (newlyExpiredPartitions.contains(cleanedPartitionSeqNo.getKey())) {
            // this is a newly expired partition, check that we did actually mark it as expired
            if (!isShardExpirationMarker(cleanedPartitionSeqNo.getValue())) {
                throw new IAE("Newly expired partition [%] was not marked as expired in the cleaned partition map [%s], original partition map: [%s]", cleanedPartitionSeqNo.getKey(), cleanedPartitionSeqNos, oldPartitionSeqNos);
            }
        } else if (!oldOffset.equals(cleanedPartitionSeqNo.getValue())) {
            // this is not an expired shard, check that the offset did not change
            throw new IAE("Cleaned partition map [%s] has offset mismatch for partition ID [%s], original partition map: [%s]", cleanedPartitionSeqNos, cleanedPartitionSeqNo.getKey(), oldPartitionSeqNos);
        }
    }
}
Also used : IAE(org.apache.druid.java.util.common.IAE)

Example 99 with IAE

use of org.apache.druid.java.util.common.IAE in project druid by druid-io.

the class SeekableStreamSupervisor method verifyAndMergeCheckpoints.

/**
 * This method does two things -
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published sequences.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();
    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>> checkpointsFuture = taskClient.getCheckpointsAsync(taskId, true);
        futures.add(checkpointsFuture);
        taskIds.add(taskId);
    }
    try {
        List<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>> futuresResult = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    stateManager.recordThrowableEvent(e);
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId, "Exception[%s] while getting checkpoints", e.getClass());
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    final DataSourceMetadata rawDataSourceMetadata = indexerMetadataStorageCoordinator.retrieveDataSourceMetadata(dataSource);
    if (rawDataSourceMetadata != null && !checkSourceMetadataMatch(rawDataSourceMetadata)) {
        throw new IAE("Datasource metadata instance does not match required, found instance of [%s]", rawDataSourceMetadata.getClass());
    }
    @SuppressWarnings("unchecked") final SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> latestDataSourceMetadata = (SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType>) rawDataSourceMetadata;
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null && latestDataSourceMetadata.getSeekableStreamSequenceNumbers() != null && ioConfig.getStream().equals(latestDataSourceMetadata.getSeekableStreamSequenceNumbers().getStream());
    final Map<PartitionIdType, SequenceOffsetType> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap();
    } else {
        latestOffsetsFromDb = null;
    }
    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));
    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;
    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // store
            if (taskCheckpoints.entrySet().stream().anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream().allMatch(partitionOffset -> {
                OrderedSequenceNumber<SequenceOffsetType> sequence = makeSequenceNumber(partitionOffset.getValue());
                OrderedSequenceNumber<SequenceOffsetType> latestOffset = makeSequenceNumber(latestOffsetsFromDb == null ? partitionOffset.getValue() : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(), partitionOffset.getValue()));
                return sequence.compareTo(latestOffset) == 0;
            }) && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey())) || (pendingCompletionTaskGroups.getOrDefault(groupId, new CopyOnWriteArrayList<>()).size() > 0 && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<PartitionIdType, SequenceOffsetType>> latestCheckpoints = new TreeMap<>(taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.checkpointSequences.clear();
                taskGroup.checkpointSequences.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId, taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.checkpointSequences.firstKey()) == null || !(taskCheckpoints.get(taskGroup.checkpointSequences.firstKey()).equals(taskGroup.checkpointSequences.firstEntry().getValue())) || taskCheckpoints.tailMap(taskGroup.checkpointSequences.firstKey()).size() != taskGroup.checkpointSequences.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId, taskCheckpoints, taskGroup.checkpointSequences);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }
    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0 && pendingCompletionTaskGroups.getOrDefault(groupId, new CopyOnWriteArrayList<>()).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest sequence information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        activelyReadingTaskGroups.remove(groupId);
        for (PartitionIdType partitionId : taskGroup.startingSequences.keySet()) {
            partitionOffsets.put(partitionId, getNotSetMarker());
        }
    }
    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs)).forEach(sequenceCheckpoint -> {
        killTask(sequenceCheckpoint.lhs, "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest " + "persisted sequences in metadata store [%s]", sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.checkpointSequences, latestOffsetsFromDb);
        taskGroup.tasks.remove(sequenceCheckpoint.lhs);
    });
}
Also used : SeekableStreamIndexTask(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask) Pair(org.apache.druid.java.util.common.Pair) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) TaskQueue(org.apache.druid.indexing.overlord.TaskQueue) Optional(com.google.common.base.Optional) TaskRunner(org.apache.druid.indexing.overlord.TaskRunner) Duration(java.time.Duration) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) Execs(org.apache.druid.java.util.common.concurrent.Execs) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap) LagStats(org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats) TaskState(org.apache.druid.indexer.TaskState) Stream(java.util.stream.Stream) Predicate(com.google.common.base.Predicate) RowIngestionMetersFactory(org.apache.druid.segment.incremental.RowIngestionMetersFactory) TaskMaster(org.apache.druid.indexing.overlord.TaskMaster) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) Joiner(com.google.common.base.Joiner) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) Iterables(com.google.common.collect.Iterables) SupervisorStateManager(org.apache.druid.indexing.overlord.supervisor.SupervisorStateManager) Callable(java.util.concurrent.Callable) TaskStatus(org.apache.druid.indexer.TaskStatus) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) EntryExistsException(org.apache.druid.metadata.EntryExistsException) SeekableStreamIndexTaskIOConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskIOConfig) StringComparators(org.apache.druid.query.ordering.StringComparators) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BiConsumer(java.util.function.BiConsumer) SupervisorManager(org.apache.druid.indexing.overlord.supervisor.SupervisorManager) AutoScalerConfig(org.apache.druid.indexing.seekablestream.supervisor.autoscaler.AutoScalerConfig) RetryUtils(org.apache.druid.java.util.common.RetryUtils) SeekableStreamIndexTaskClientFactory(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClientFactory) Nullable(javax.annotation.Nullable) SeekableStreamIndexTaskTuningConfig(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTuningConfig) SeekableStreamIndexTaskClient(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClient) BlockingDeque(java.util.concurrent.BlockingDeque) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) TaskLocation(org.apache.druid.indexer.TaskLocation) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Futures(com.google.common.util.concurrent.Futures) TaskInfoProvider(org.apache.druid.indexing.common.TaskInfoProvider) TreeMap(java.util.TreeMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) LinkedBlockingDeque(java.util.concurrent.LinkedBlockingDeque) SupervisorReport(org.apache.druid.indexing.overlord.supervisor.SupervisorReport) Preconditions(com.google.common.base.Preconditions) DataSchema(org.apache.druid.segment.indexing.DataSchema) SeekableStreamSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamSequenceNumbers) StreamPartition(org.apache.druid.indexing.seekablestream.common.StreamPartition) TimeoutException(java.util.concurrent.TimeoutException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SeekableStreamStartSequenceNumbers(org.apache.druid.indexing.seekablestream.SeekableStreamStartSequenceNumbers) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) List(java.util.List) MetadataSupervisorManager(org.apache.druid.metadata.MetadataSupervisorManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) Entry(java.util.Map.Entry) ByteEntity(org.apache.druid.data.input.impl.ByteEntity) SortedMap(java.util.SortedMap) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) RecordSupplier(org.apache.druid.indexing.seekablestream.common.RecordSupplier) HashMap(java.util.HashMap) HashSet(java.util.HashSet) MapperFeature(com.fasterxml.jackson.databind.MapperFeature) ImmutableList(com.google.common.collect.ImmutableList) IndexTaskClient(org.apache.druid.indexing.common.IndexTaskClient) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) ExecutorService(java.util.concurrent.ExecutorService) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) TimeUnit(java.util.concurrent.TimeUnit) TaskRunnerWorkItem(org.apache.druid.indexing.overlord.TaskRunnerWorkItem) VisibleForTesting(com.google.common.annotations.VisibleForTesting) DigestUtils(org.apache.commons.codec.digest.DigestUtils) Supervisor(org.apache.druid.indexing.overlord.supervisor.Supervisor) Comparator(java.util.Comparator) Collections(java.util.Collections) SeekableStreamIndexTaskRunner(org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) OrderedSequenceNumber(org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) SeekableStreamDataSourceMetadata(org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata) Pair(org.apache.druid.java.util.common.Pair) HashSet(java.util.HashSet) TreeMap(java.util.TreeMap) IAE(org.apache.druid.java.util.common.IAE) EntryExistsException(org.apache.druid.metadata.EntryExistsException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) StreamException(org.apache.druid.indexing.seekablestream.common.StreamException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map) Int2ObjectLinkedOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectLinkedOpenHashMap) TreeMap(java.util.TreeMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 100 with IAE

use of org.apache.druid.java.util.common.IAE in project druid by druid-io.

the class TrimExprMacro method apply.

@Override
public Expr apply(final List<Expr> args) {
    if (args.size() < 1 || args.size() > 2) {
        throw new IAE("Function[%s] must have 1 or 2 arguments", name());
    }
    final Function<Expr.Shuttle, Expr> visitFn = shuttle -> shuttle.visit(apply(shuttle.visitAll(args)));
    if (args.size() == 1) {
        return new TrimStaticCharsExpr(mode, args.get(0), DEFAULT_CHARS, null, visitFn);
    } else {
        final Expr charsArg = args.get(1);
        if (charsArg.isLiteral()) {
            final String charsString = charsArg.eval(InputBindings.nilBindings()).asString();
            final char[] chars = charsString == null ? EMPTY_CHARS : charsString.toCharArray();
            return new TrimStaticCharsExpr(mode, args.get(0), chars, charsArg, visitFn);
        } else {
            return new TrimDynamicCharsExpr(mode, args.get(0), args.get(1), visitFn);
        }
    }
}
Also used : InputBindings(org.apache.druid.math.expr.InputBindings) Arrays(java.util.Arrays) ImmutableSet(com.google.common.collect.ImmutableSet) StringUtils(org.apache.druid.java.util.common.StringUtils) Function(java.util.function.Function) ExprEval(org.apache.druid.math.expr.ExprEval) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) Objects(java.util.Objects) List(java.util.List) ExpressionType(org.apache.druid.math.expr.ExpressionType) Expr(org.apache.druid.math.expr.Expr) VisibleForTesting(com.google.common.annotations.VisibleForTesting) IAE(org.apache.druid.java.util.common.IAE) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Expr(org.apache.druid.math.expr.Expr) IAE(org.apache.druid.java.util.common.IAE)

Aggregations

IAE (org.apache.druid.java.util.common.IAE)115 ISE (org.apache.druid.java.util.common.ISE)23 IOException (java.io.IOException)20 ByteBuffer (java.nio.ByteBuffer)19 ArrayList (java.util.ArrayList)16 List (java.util.List)14 Expr (org.apache.druid.math.expr.Expr)14 Nullable (javax.annotation.Nullable)12 ColumnType (org.apache.druid.segment.column.ColumnType)10 HashSet (java.util.HashSet)8 Map (java.util.Map)8 Interval (org.joda.time.Interval)8 VisibleForTesting (com.google.common.annotations.VisibleForTesting)7 HashMap (java.util.HashMap)7 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)7 File (java.io.File)6 Iterables (com.google.common.collect.Iterables)5 Arrays (java.util.Arrays)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4