Search in sources :

Example 6 with Accumulator

use of org.apache.druid.java.util.common.guava.Accumulator in project druid by apache.

the class SegmentAnalyzer method analyzeStringColumn.

private ColumnAnalysis analyzeStringColumn(final ColumnCapabilities capabilities, final StorageAdapter storageAdapter, final String columnName) {
    int cardinality = 0;
    long size = 0;
    Comparable min = null;
    Comparable max = null;
    if (analyzingCardinality()) {
        cardinality = storageAdapter.getDimensionCardinality(columnName);
    }
    if (analyzingSize()) {
        final DateTime start = storageAdapter.getMinTime();
        final DateTime end = storageAdapter.getMaxTime();
        final Sequence<Cursor> cursors = storageAdapter.makeCursors(null, new Interval(start, end), VirtualColumns.EMPTY, Granularities.ALL, false, null);
        size = cursors.accumulate(0L, new Accumulator<Long, Cursor>() {

            @Override
            public Long accumulate(Long accumulated, Cursor cursor) {
                DimensionSelector selector = cursor.getColumnSelectorFactory().makeDimensionSelector(new DefaultDimensionSpec(columnName, columnName));
                if (selector == null) {
                    return accumulated;
                }
                long current = accumulated;
                while (!cursor.isDone()) {
                    final IndexedInts row = selector.getRow();
                    for (int i = 0, rowSize = row.size(); i < rowSize; ++i) {
                        final String dimVal = selector.lookupName(row.get(i));
                        if (dimVal != null && !dimVal.isEmpty()) {
                            current += StringUtils.estimatedBinaryLengthAsUTF8(dimVal);
                        }
                    }
                    cursor.advance();
                }
                return current;
            }
        });
    }
    if (analyzingMinMax()) {
        min = storageAdapter.getMinValue(columnName);
        max = storageAdapter.getMaxValue(columnName);
    }
    return new ColumnAnalysis(capabilities.toColumnType(), capabilities.getType().name(), capabilities.hasMultipleValues().isTrue(), // if we don't know for sure, then we should plan to check for nulls
    capabilities.hasNulls().isMaybeTrue(), size, cardinality, min, max, null);
}
Also used : Accumulator(org.apache.druid.java.util.common.guava.Accumulator) DimensionSelector(org.apache.druid.segment.DimensionSelector) Cursor(org.apache.druid.segment.Cursor) DateTime(org.joda.time.DateTime) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) IndexedInts(org.apache.druid.segment.data.IndexedInts) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) Interval(org.joda.time.Interval)

Example 7 with Accumulator

use of org.apache.druid.java.util.common.guava.Accumulator in project druid by apache.

the class RowBasedGrouperHelper method createGrouperAccumulatorPair.

/**
 * Create a {@link Grouper} that groups according to the dimensions and aggregators in "query", along with
 * an {@link Accumulator} that accepts ResultRows and forwards them to the grouper.
 *
 * The pair will operate in one of two modes:
 *
 * 1) Combining mode (used if "subquery" is null). In this mode, filters from the "query" are ignored, and
 * its aggregators are converted into combining form. The input ResultRows are assumed to be partially-grouped
 * results originating from the provided "query".
 *
 * 2) Subquery mode (used if "subquery" is nonnull). In this mode, filters from the "query" (both intervals
 * and dim filters) are respected, and its aggregators are used in standard (not combining) form. The input
 * ResultRows are assumed to be results originating from the provided "subquery".
 *
 * @param query               query that we are grouping for
 * @param subquery            optional subquery that we are receiving results from (see combining vs. subquery
 *                            mode above)
 * @param config              groupBy query config
 * @param bufferSupplier      supplier of merge buffers
 * @param combineBufferHolder holder of combine buffers. Unused if concurrencyHint = -1, and may be null in that case
 * @param concurrencyHint     -1 for single-threaded Grouper, >=1 for concurrent Grouper
 * @param temporaryStorage    temporary storage used for spilling from the Grouper
 * @param spillMapper         object mapper used for spilling from the Grouper
 * @param grouperSorter       executor service used for parallel combining. Unused if concurrencyHint = -1, and may
 *                            be null in that case
 * @param priority            query priority
 * @param hasQueryTimeout     whether or not this query has a timeout
 * @param queryTimeoutAt      when this query times out, in milliseconds since the epoch
 * @param mergeBufferSize     size of the merge buffers from "bufferSupplier"
 */
public static Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> createGrouperAccumulatorPair(final GroupByQuery query, @Nullable final GroupByQuery subquery, final GroupByQueryConfig config, final Supplier<ByteBuffer> bufferSupplier, @Nullable final ReferenceCountingResourceHolder<ByteBuffer> combineBufferHolder, final int concurrencyHint, final LimitedTemporaryStorage temporaryStorage, final ObjectMapper spillMapper, @Nullable final ListeningExecutorService grouperSorter, final int priority, final boolean hasQueryTimeout, final long queryTimeoutAt, final int mergeBufferSize) {
    // concurrencyHint >= 1 for concurrent groupers, -1 for single-threaded
    Preconditions.checkArgument(concurrencyHint >= 1 || concurrencyHint == -1, "invalid concurrencyHint");
    if (concurrencyHint >= 1) {
        Preconditions.checkNotNull(grouperSorter, "grouperSorter executor must be provided");
    }
    // See method-level javadoc; we go into combining mode if there is no subquery.
    final boolean combining = subquery == null;
    final List<ColumnType> valueTypes = DimensionHandlerUtils.getValueTypesFromDimensionSpecs(query.getDimensions());
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final boolean includeTimestamp = query.getResultRowHasTimestamp();
    final ThreadLocal<ResultRow> columnSelectorRow = new ThreadLocal<>();
    ColumnSelectorFactory columnSelectorFactory = createResultRowBasedColumnSelectorFactory(combining ? query : subquery, columnSelectorRow::get, RowSignature.Finalization.UNKNOWN);
    // Apply virtual columns if we are in subquery (non-combining) mode.
    if (!combining) {
        columnSelectorFactory = query.getVirtualColumns().wrap(columnSelectorFactory);
    }
    final boolean willApplyLimitPushDown = query.isApplyLimitPushDown();
    final DefaultLimitSpec limitSpec = willApplyLimitPushDown ? (DefaultLimitSpec) query.getLimitSpec() : null;
    boolean sortHasNonGroupingFields = false;
    if (willApplyLimitPushDown) {
        sortHasNonGroupingFields = DefaultLimitSpec.sortingOrderHasNonGroupingFields(limitSpec, query.getDimensions());
    }
    final AggregatorFactory[] aggregatorFactories;
    if (combining) {
        aggregatorFactories = query.getAggregatorSpecs().stream().map(AggregatorFactory::getCombiningFactory).toArray(AggregatorFactory[]::new);
    } else {
        aggregatorFactories = query.getAggregatorSpecs().toArray(new AggregatorFactory[0]);
    }
    final Grouper.KeySerdeFactory<RowBasedKey> keySerdeFactory = new RowBasedKeySerdeFactory(includeTimestamp, query.getContextSortByDimsFirst(), query.getDimensions(), querySpecificConfig.getMaxMergingDictionarySize() / (concurrencyHint == -1 ? 1 : concurrencyHint), valueTypes, aggregatorFactories, limitSpec);
    final Grouper<RowBasedKey> grouper;
    if (concurrencyHint == -1) {
        grouper = new SpillingGrouper<>(bufferSupplier, keySerdeFactory, columnSelectorFactory, aggregatorFactories, querySpecificConfig.getBufferGrouperMaxSize(), querySpecificConfig.getBufferGrouperMaxLoadFactor(), querySpecificConfig.getBufferGrouperInitialBuckets(), temporaryStorage, spillMapper, true, limitSpec, sortHasNonGroupingFields, mergeBufferSize);
    } else {
        final Grouper.KeySerdeFactory<RowBasedKey> combineKeySerdeFactory = new RowBasedKeySerdeFactory(includeTimestamp, query.getContextSortByDimsFirst(), query.getDimensions(), // use entire dictionary space for combining key serde
        querySpecificConfig.getMaxMergingDictionarySize(), valueTypes, aggregatorFactories, limitSpec);
        grouper = new ConcurrentGrouper<>(querySpecificConfig, bufferSupplier, combineBufferHolder, keySerdeFactory, combineKeySerdeFactory, columnSelectorFactory, aggregatorFactories, temporaryStorage, spillMapper, concurrencyHint, limitSpec, sortHasNonGroupingFields, grouperSorter, priority, hasQueryTimeout, queryTimeoutAt);
    }
    final int keySize = includeTimestamp ? query.getDimensions().size() + 1 : query.getDimensions().size();
    final ValueExtractFunction valueExtractFn = makeValueExtractFunction(query, combining, includeTimestamp, columnSelectorFactory, valueTypes);
    final Predicate<ResultRow> rowPredicate;
    if (combining) {
        // Filters are not applied in combining mode.
        rowPredicate = row -> true;
    } else {
        rowPredicate = getResultRowPredicate(query, subquery);
    }
    final Accumulator<AggregateResult, ResultRow> accumulator = (priorResult, row) -> {
        BaseQuery.checkInterrupted();
        if (priorResult != null && !priorResult.isOk()) {
            // Pass-through error returns without doing more work.
            return priorResult;
        }
        if (!grouper.isInitialized()) {
            grouper.init();
        }
        if (!rowPredicate.test(row)) {
            return AggregateResult.ok();
        }
        columnSelectorRow.set(row);
        final Comparable[] key = new Comparable[keySize];
        valueExtractFn.apply(row, key);
        final AggregateResult aggregateResult = grouper.aggregate(new RowBasedKey(key));
        columnSelectorRow.set(null);
        return aggregateResult;
    };
    return new Pair<>(grouper, accumulator);
}
Also used : Arrays(java.util.Arrays) Comparators(org.apache.druid.java.util.common.guava.Comparators) IntArrayUtils(org.apache.druid.common.utils.IntArrayUtils) DimensionHandlerUtils(org.apache.druid.segment.DimensionHandlerUtils) ColumnValueSelector(org.apache.druid.segment.ColumnValueSelector) AllGranularity(org.apache.druid.java.util.common.granularity.AllGranularity) IndexedInts(org.apache.druid.segment.data.IndexedInts) ByteBuffer(java.nio.ByteBuffer) Pair(org.apache.druid.java.util.common.Pair) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) BaseFloatColumnValueSelector(org.apache.druid.segment.BaseFloatColumnValueSelector) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) ColumnSelectorFactory(org.apache.druid.segment.ColumnSelectorFactory) RowAdapter(org.apache.druid.segment.RowAdapter) ColumnSelectorStrategyFactory(org.apache.druid.query.dimension.ColumnSelectorStrategyFactory) JsonValue(com.fasterxml.jackson.annotation.JsonValue) GroupingAggregatorFactory(org.apache.druid.query.aggregation.GroupingAggregatorFactory) BufferComparator(org.apache.druid.query.groupby.epinephelinae.Grouper.BufferComparator) Object2IntOpenHashMap(it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap) IAE(org.apache.druid.java.util.common.IAE) ToLongFunction(java.util.function.ToLongFunction) Longs(com.google.common.primitives.Longs) RowBasedColumnSelectorFactory(org.apache.druid.segment.RowBasedColumnSelectorFactory) ResultRow(org.apache.druid.query.groupby.ResultRow) Predicate(java.util.function.Predicate) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) ValueType(org.apache.druid.segment.column.ValueType) Collectors(java.util.stream.Collectors) List(java.util.List) ColumnCapabilitiesImpl(org.apache.druid.segment.column.ColumnCapabilitiesImpl) BooleanValueMatcher(org.apache.druid.segment.filter.BooleanValueMatcher) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) ColumnCapabilities(org.apache.druid.segment.column.ColumnCapabilities) BaseDoubleColumnValueSelector(org.apache.druid.segment.BaseDoubleColumnValueSelector) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) IntStream(java.util.stream.IntStream) ColumnSelectorPlus(org.apache.druid.query.ColumnSelectorPlus) ComparableList(org.apache.druid.segment.data.ComparableList) Supplier(com.google.common.base.Supplier) BaseQuery(org.apache.druid.query.BaseQuery) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Interval(org.joda.time.Interval) SettableSupplier(org.apache.druid.common.guava.SettableSupplier) ColumnSelectorStrategy(org.apache.druid.query.dimension.ColumnSelectorStrategy) StringComparators(org.apache.druid.query.ordering.StringComparators) ComparableStringArray(org.apache.druid.segment.data.ComparableStringArray) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) DimensionSelector(org.apache.druid.segment.DimensionSelector) Nullable(javax.annotation.Nullable) ValueMatcher(org.apache.druid.query.filter.ValueMatcher) ColumnInspector(org.apache.druid.segment.ColumnInspector) StringComparator(org.apache.druid.query.ordering.StringComparator) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) DateTime(org.joda.time.DateTime) Ints(com.google.common.primitives.Ints) BaseLongColumnValueSelector(org.apache.druid.segment.BaseLongColumnValueSelector) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) NullHandling(org.apache.druid.common.config.NullHandling) RowSignature(org.apache.druid.segment.column.RowSignature) Closeable(java.io.Closeable) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) ColumnType(org.apache.druid.segment.column.ColumnType) Preconditions(com.google.common.base.Preconditions) BitSet(java.util.BitSet) IntArrays(it.unimi.dsi.fastutil.ints.IntArrays) Comparator(java.util.Comparator) Filters(org.apache.druid.segment.filter.Filters) ReferenceCountingResourceHolder(org.apache.druid.collections.ReferenceCountingResourceHolder) Filter(org.apache.druid.query.filter.Filter) ColumnType(org.apache.druid.segment.column.ColumnType) ColumnSelectorFactory(org.apache.druid.segment.ColumnSelectorFactory) RowBasedColumnSelectorFactory(org.apache.druid.segment.RowBasedColumnSelectorFactory) DefaultLimitSpec(org.apache.druid.query.groupby.orderby.DefaultLimitSpec) Pair(org.apache.druid.java.util.common.Pair) ResultRow(org.apache.druid.query.groupby.ResultRow) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) GroupingAggregatorFactory(org.apache.druid.query.aggregation.GroupingAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory)

Example 8 with Accumulator

use of org.apache.druid.java.util.common.guava.Accumulator in project druid by apache.

the class GroupByRowProcessor method process.

/**
 * Process the input of sequence "rows" (output by "subquery") based on "query" and returns a {@link ResultSupplier}.
 *
 * In addition to grouping using dimensions and metrics, it will also apply filters (both DimFilter and interval
 * filters).
 *
 * The input sequence is processed synchronously with the call to this method, and result iteration happens lazy upon
 * calls to the {@link ResultSupplier}. Make sure to close it when you're done.
 */
public static ResultSupplier process(final GroupByQuery query, final GroupByQuery subquery, final Sequence<ResultRow> rows, final GroupByQueryConfig config, final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir, final int mergeBufferSize) {
    final Closer closeOnExit = Closer.create();
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final File temporaryStorageDirectory = new File(processingTmpDir, StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
    closeOnExit.register(temporaryStorage);
    Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, subquery, querySpecificConfig, new Supplier<ByteBuffer>() {

        @Override
        public ByteBuffer get() {
            final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
            closeOnExit.register(mergeBufferHolder);
            return mergeBufferHolder.get();
        }
    }, temporaryStorage, spillMapper, mergeBufferSize);
    final Grouper<RowBasedKey> grouper = pair.lhs;
    final Accumulator<AggregateResult, ResultRow> accumulator = pair.rhs;
    closeOnExit.register(grouper);
    final AggregateResult retVal = rows.accumulate(AggregateResult.ok(), accumulator);
    if (!retVal.isOk()) {
        throw new ResourceLimitExceededException(retVal.getReason());
    }
    return new ResultSupplier() {

        @Override
        public Sequence<ResultRow> results(@Nullable List<DimensionSpec> dimensionsToInclude) {
            return getRowsFromGrouper(query, grouper, dimensionsToInclude);
        }

        @Override
        public void close() throws IOException {
            closeOnExit.close();
        }
    };
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) ResultRow(org.apache.druid.query.groupby.ResultRow) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) ResourceHolder(org.apache.druid.collections.ResourceHolder) RowBasedKey(org.apache.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) ByteBuffer(java.nio.ByteBuffer) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) List(java.util.List) File(java.io.File) Nullable(javax.annotation.Nullable)

Example 9 with Accumulator

use of org.apache.druid.java.util.common.guava.Accumulator in project druid by apache.

the class GroupByQueryHelper method createIndexAccumulatorPair.

public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair(final GroupByQuery query, @Nullable final GroupByQuery subquery, final GroupByQueryConfig config) {
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final Granularity gran = query.getGranularity();
    final long timeStart = query.getIntervals().get(0).getStartMillis();
    final boolean combine = subquery == null;
    long granTimeStart = timeStart;
    if (!(Granularities.ALL.equals(gran))) {
        granTimeStart = gran.bucketStart(timeStart);
    }
    final List<AggregatorFactory> aggs;
    if (combine) {
        aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() {

            @Override
            public AggregatorFactory apply(AggregatorFactory input) {
                return input.getCombiningFactory();
            }
        });
    } else {
        aggs = query.getAggregatorSpecs();
    }
    final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() {

        @Override
        public String apply(DimensionSpec input) {
            return input.getOutputName();
        }
    });
    final IncrementalIndex index;
    final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true);
    // All groupBy dimensions are strings, for now.
    final List<DimensionSchema> dimensionSchemas = new ArrayList<>();
    for (DimensionSpec dimension : query.getDimensions()) {
        dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName()));
    }
    final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(new DimensionsSpec(dimensionSchemas)).withMetrics(aggs.toArray(new AggregatorFactory[0])).withQueryGranularity(gran).withMinTimestamp(granTimeStart).build();
    final AppendableIndexBuilder indexBuilder;
    if (query.getContextValue("useOffheap", false)) {
        throw new UnsupportedOperationException("The 'useOffheap' option is no longer available for groupBy v1. Please move to the newer groupBy engine, " + "which always operates off-heap, by removing any custom 'druid.query.groupBy.defaultStrategy' runtime " + "properties and 'groupByStrategy' query context parameters that you have set.");
    } else {
        indexBuilder = new OnheapIncrementalIndex.Builder();
    }
    index = indexBuilder.setIndexSchema(indexSchema).setDeserializeComplexMetrics(false).setConcurrentEventAdd(true).setSortFacts(sortResults).setMaxRowCount(querySpecificConfig.getMaxResults()).build();
    Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() {

        @Override
        public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) {
            final MapBasedRow mapBasedRow;
            if (in instanceof MapBasedRow) {
                mapBasedRow = (MapBasedRow) in;
            } else if (in instanceof ResultRow) {
                final ResultRow row = (ResultRow) in;
                mapBasedRow = row.toMapBasedRow(combine ? query : subquery);
            } else {
                throw new ISE("Unable to accumulate something of type [%s]", in.getClass());
            }
            try {
                accumulated.add(new MapBasedInputRow(mapBasedRow.getTimestamp(), dimensions, mapBasedRow.getEvent()));
            } catch (IndexSizeExceededException e) {
                throw new ResourceLimitExceededException(e.getMessage());
            }
            return accumulated;
        }
    };
    return new Pair<>(index, accumulator);
}
Also used : Accumulator(org.apache.druid.java.util.common.guava.Accumulator) DimensionSpec(org.apache.druid.query.dimension.DimensionSpec) AppendableIndexBuilder(org.apache.druid.segment.incremental.AppendableIndexBuilder) ArrayList(java.util.ArrayList) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) Granularity(org.apache.druid.java.util.common.granularity.Granularity) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) DimensionSchema(org.apache.druid.data.input.impl.DimensionSchema) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Function(com.google.common.base.Function) ISE(org.apache.druid.java.util.common.ISE) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) Pair(org.apache.druid.java.util.common.Pair) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringDimensionSchema(org.apache.druid.data.input.impl.StringDimensionSchema) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException)

Example 10 with Accumulator

use of org.apache.druid.java.util.common.guava.Accumulator in project druid by apache.

the class GroupByQueryHelper method createBySegmentAccumulatorPair.

public static <T> Pair<Queue, Accumulator<Queue, T>> createBySegmentAccumulatorPair() {
    // In parallel query runner multiple threads add to this queue concurrently
    Queue init = new ConcurrentLinkedQueue<>();
    Accumulator<Queue, T> accumulator = new Accumulator<Queue, T>() {

        @Override
        public Queue accumulate(Queue accumulated, T in) {
            if (in == null) {
                throw new ISE("Cannot have null result");
            }
            accumulated.offer(in);
            return accumulated;
        }
    };
    return new Pair<>(init, accumulator);
}
Also used : Accumulator(org.apache.druid.java.util.common.guava.Accumulator) ISE(org.apache.druid.java.util.common.ISE) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Queue(java.util.Queue) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Pair(org.apache.druid.java.util.common.Pair)

Aggregations

Accumulator (org.apache.druid.java.util.common.guava.Accumulator)20 ISE (org.apache.druid.java.util.common.ISE)10 List (java.util.List)8 Pair (org.apache.druid.java.util.common.Pair)8 ResourceLimitExceededException (org.apache.druid.query.ResourceLimitExceededException)8 GroupByQueryConfig (org.apache.druid.query.groupby.GroupByQueryConfig)8 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 ArrayList (java.util.ArrayList)6 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)6 ByteBuffer (java.nio.ByteBuffer)5 Sequence (org.apache.druid.java.util.common.guava.Sequence)5 Yielder (org.apache.druid.java.util.common.guava.Yielder)5 YieldingAccumulator (org.apache.druid.java.util.common.guava.YieldingAccumulator)5 ResultRow (org.apache.druid.query.groupby.ResultRow)5 ImmutableList (com.google.common.collect.ImmutableList)4 HashSet (java.util.HashSet)4 Queue (java.util.Queue)4 Set (java.util.Set)4 Nullable (javax.annotation.Nullable)4 QueryRunner (org.apache.druid.query.QueryRunner)4