Search in sources :

Example 1 with Accumulator

use of io.druid.java.util.common.guava.Accumulator in project druid by druid-io.

the class GroupByRowProcessor method process.

public static Sequence<Row> process(final Query queryParam, final Sequence<Row> rows, final Map<String, ValueType> rowSignature, final GroupByQueryConfig config, final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final AggregatorFactory[] aggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        aggregatorFactories[i] = query.getAggregatorSpecs().get(i);
    }
    final File temporaryStorageDirectory = new File(processingTmpDir, String.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final List<Interval> queryIntervals = query.getIntervals();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter()));
    final SettableSupplier<Row> rowSupplier = new SettableSupplier<>();
    final RowBasedColumnSelectorFactory columnSelectorFactory = RowBasedColumnSelectorFactory.create(rowSupplier, rowSignature);
    final ValueMatcher filterMatcher = filter == null ? BooleanValueMatcher.of(true) : filter.makeMatcher(columnSelectorFactory);
    final FilteredSequence<Row> filteredSequence = new FilteredSequence<>(rows, new Predicate<Row>() {

        @Override
        public boolean apply(Row input) {
            boolean inInterval = false;
            DateTime rowTime = input.getTimestamp();
            for (Interval queryInterval : queryIntervals) {
                if (queryInterval.contains(rowTime)) {
                    inInterval = true;
                    break;
                }
            }
            if (!inInterval) {
                return false;
            }
            rowSupplier.set(input);
            return filterMatcher.matches();
        }
    });
    return new BaseSequence<>(new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {

        @Override
        public CloseableGrouperIterator<RowBasedKey, Row> make() {
            // This contains all closeable objects which are closed when the returned iterator iterates all the elements,
            // or an exceptions is thrown. The objects are closed in their reverse order.
            final List<Closeable> closeOnExit = Lists.newArrayList();
            try {
                final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                closeOnExit.add(temporaryStorage);
                Pair<Grouper<RowBasedKey>, Accumulator<Grouper<RowBasedKey>, Row>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, true, rowSignature, querySpecificConfig, new Supplier<ByteBuffer>() {

                    @Override
                    public ByteBuffer get() {
                        final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
                        closeOnExit.add(mergeBufferHolder);
                        return mergeBufferHolder.get();
                    }
                }, -1, temporaryStorage, spillMapper, aggregatorFactories);
                final Grouper<RowBasedKey> grouper = pair.lhs;
                final Accumulator<Grouper<RowBasedKey>, Row> accumulator = pair.rhs;
                closeOnExit.add(grouper);
                final Grouper<RowBasedKey> retVal = filteredSequence.accumulate(grouper, accumulator);
                if (retVal != grouper) {
                    throw GroupByQueryHelper.throwAccumulationResourceLimitExceededException();
                }
                return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {

                    @Override
                    public void close() throws IOException {
                        for (Closeable closeable : Lists.reverse(closeOnExit)) {
                            CloseQuietly.close(closeable);
                        }
                    }
                });
            } catch (Throwable e) {
                // Exception caught while setting up the iterator; release resources.
                for (Closeable closeable : Lists.reverse(closeOnExit)) {
                    CloseQuietly.close(closeable);
                }
                throw e;
            }
        }

        @Override
        public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
            iterFromMake.close();
        }
    });
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) Closeable(java.io.Closeable) RowBasedColumnSelectorFactory(io.druid.query.groupby.RowBasedColumnSelectorFactory) DateTime(org.joda.time.DateTime) GroupByQuery(io.druid.query.groupby.GroupByQuery) List(java.util.List) Supplier(com.google.common.base.Supplier) SettableSupplier(io.druid.common.guava.SettableSupplier) Pair(io.druid.java.util.common.Pair) BooleanValueMatcher(io.druid.segment.filter.BooleanValueMatcher) ValueMatcher(io.druid.query.filter.ValueMatcher) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) FilteredSequence(io.druid.java.util.common.guava.FilteredSequence) RowBasedKey(io.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) ByteBuffer(java.nio.ByteBuffer) BaseSequence(io.druid.java.util.common.guava.BaseSequence) SettableSupplier(io.druid.common.guava.SettableSupplier) Filter(io.druid.query.filter.Filter) Row(io.druid.data.input.Row) File(java.io.File) Interval(org.joda.time.Interval)

Example 2 with Accumulator

use of io.druid.java.util.common.guava.Accumulator in project druid by druid-io.

the class GroupByQueryHelper method createIndexAccumulatorPair.

public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair(final GroupByQuery query, final GroupByQueryConfig config, StupidPool<ByteBuffer> bufferPool, final boolean combine) {
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    final Granularity gran = query.getGranularity();
    final long timeStart = query.getIntervals().get(0).getStartMillis();
    long granTimeStart = timeStart;
    if (!(Granularities.ALL.equals(gran))) {
        granTimeStart = gran.bucketStart(new DateTime(timeStart)).getMillis();
    }
    final List<AggregatorFactory> aggs;
    if (combine) {
        aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() {

            @Override
            public AggregatorFactory apply(AggregatorFactory input) {
                return input.getCombiningFactory();
            }
        });
    } else {
        aggs = query.getAggregatorSpecs();
    }
    final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() {

        @Override
        public String apply(DimensionSpec input) {
            return input.getOutputName();
        }
    });
    final IncrementalIndex index;
    final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true);
    // All groupBy dimensions are strings, for now.
    final List<DimensionSchema> dimensionSchemas = Lists.newArrayList();
    for (DimensionSpec dimension : query.getDimensions()) {
        dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName()));
    }
    final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(new DimensionsSpec(dimensionSchemas, null, null)).withMetrics(aggs.toArray(new AggregatorFactory[aggs.size()])).withQueryGranularity(gran).withMinTimestamp(granTimeStart).build();
    if (query.getContextValue("useOffheap", false)) {
        index = new OffheapIncrementalIndex(indexSchema, false, true, sortResults, querySpecificConfig.getMaxResults(), bufferPool);
    } else {
        index = new OnheapIncrementalIndex(indexSchema, false, true, sortResults, querySpecificConfig.getMaxResults());
    }
    Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() {

        @Override
        public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) {
            if (in instanceof MapBasedRow) {
                try {
                    MapBasedRow row = (MapBasedRow) in;
                    accumulated.add(new MapBasedInputRow(row.getTimestamp(), dimensions, row.getEvent()));
                } catch (IndexSizeExceededException e) {
                    throw new ResourceLimitExceededException(e.getMessage());
                }
            } else {
                throw new ISE("Unable to accumulate something of type [%s]", in.getClass());
            }
            return accumulated;
        }
    };
    return new Pair<>(index, accumulator);
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) DimensionSpec(io.druid.query.dimension.DimensionSpec) OffheapIncrementalIndex(io.druid.segment.incremental.OffheapIncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) Granularity(io.druid.java.util.common.granularity.Granularity) StringDimensionSchema(io.druid.data.input.impl.StringDimensionSchema) DimensionSchema(io.druid.data.input.impl.DimensionSchema) DateTime(org.joda.time.DateTime) MapBasedRow(io.druid.data.input.MapBasedRow) Function(com.google.common.base.Function) ISE(io.druid.java.util.common.ISE) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) IncrementalIndexSchema(io.druid.segment.incremental.IncrementalIndexSchema) Pair(io.druid.java.util.common.Pair) OffheapIncrementalIndex(io.druid.segment.incremental.OffheapIncrementalIndex) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) StringDimensionSchema(io.druid.data.input.impl.StringDimensionSchema) ResourceLimitExceededException(io.druid.query.ResourceLimitExceededException) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) IndexSizeExceededException(io.druid.segment.incremental.IndexSizeExceededException)

Example 3 with Accumulator

use of io.druid.java.util.common.guava.Accumulator in project druid by druid-io.

the class SpecificSegmentQueryRunnerTest method testRetry.

@Test
public void testRetry() throws Exception {
    final ObjectMapper mapper = new DefaultObjectMapper();
    SegmentDescriptor descriptor = new SegmentDescriptor(new Interval("2012-01-01T00:00:00Z/P1D"), "version", 0);
    final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {

        @Override
        public Sequence run(Query query, Map responseContext) {
            return new Sequence() {

                @Override
                public Object accumulate(Object initValue, Accumulator accumulator) {
                    throw new SegmentMissingException("FAILSAUCE");
                }

                @Override
                public Yielder<Object> toYielder(Object initValue, YieldingAccumulator accumulator) {
                    throw new SegmentMissingException("FAILSAUCE");
                }
            };
        }
    }, new SpecificSegmentSpec(descriptor));
    // from accumulate
    Map<String, Object> responseContext = Maps.newHashMap();
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(new Interval("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("rows"))).build();
    Sequence results = queryRunner.run(query, responseContext);
    Sequences.toList(results, Lists.newArrayList());
    validate(mapper, descriptor, responseContext);
    // from toYielder
    responseContext = Maps.newHashMap();
    results = queryRunner.run(query, responseContext);
    results.toYielder(null, new YieldingAccumulator() {

        final List lists = Lists.newArrayList();

        @Override
        public Object accumulate(Object accumulated, Object in) {
            lists.add(in);
            return in;
        }
    });
    validate(mapper, descriptor, responseContext);
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) YieldingAccumulator(io.druid.java.util.common.guava.YieldingAccumulator) Yielder(io.druid.java.util.common.guava.Yielder) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) Query(io.druid.query.Query) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) SegmentMissingException(io.druid.segment.SegmentMissingException) Sequence(io.druid.java.util.common.guava.Sequence) YieldingAccumulator(io.druid.java.util.common.guava.YieldingAccumulator) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) QueryRunner(io.druid.query.QueryRunner) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) SegmentDescriptor(io.druid.query.SegmentDescriptor) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Map(java.util.Map) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 4 with Accumulator

use of io.druid.java.util.common.guava.Accumulator in project druid by druid-io.

the class GroupByQueryHelper method createBySegmentAccumulatorPair.

public static <T> Pair<Queue, Accumulator<Queue, T>> createBySegmentAccumulatorPair() {
    // In parallel query runner multiple threads add to this queue concurrently
    Queue init = new ConcurrentLinkedQueue<>();
    Accumulator<Queue, T> accumulator = new Accumulator<Queue, T>() {

        @Override
        public Queue accumulate(Queue accumulated, T in) {
            if (in == null) {
                throw new ISE("Cannot have null result");
            }
            accumulated.offer(in);
            return accumulated;
        }
    };
    return new Pair<>(init, accumulator);
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) ISE(io.druid.java.util.common.ISE) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Queue(java.util.Queue) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Pair(io.druid.java.util.common.Pair)

Example 5 with Accumulator

use of io.druid.java.util.common.guava.Accumulator in project druid by druid-io.

the class GroupByMergingQueryRunnerV2 method run.

@Override
public Sequence<Row> run(final Query queryParam, final Map responseContext) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
    // CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION is here because realtime servers use nested mergeRunners calls
    // (one for the entire query and one for each sink). We only want the outer call to actually do merging with a
    // merge buffer, otherwise the query will allocate too many merge buffers. This is potentially sub-optimal as it
    // will involve materializing the results for each sink before starting to feed them into the outer merge buffer.
    // I'm not sure of a better way to do this without tweaking how realtime servers do queries.
    final boolean forceChainedExecution = query.getContextBoolean(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, false);
    final GroupByQuery queryForRunners = query.withOverriddenContext(ImmutableMap.<String, Object>of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true));
    if (BaseQuery.getContextBySegment(query, false) || forceChainedExecution) {
        return new ChainedExecutionQueryRunner(exec, queryWatcher, queryables).run(query, responseContext);
    }
    final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();
    final AggregatorFactory[] combiningAggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        combiningAggregatorFactories[i] = query.getAggregatorSpecs().get(i).getCombiningFactory();
    }
    final File temporaryStorageDirectory = new File(processingTmpDir, String.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
    final int priority = BaseQuery.getContextPriority(query, 0);
    // Figure out timeoutAt time now, so we can apply the timeout to both the mergeBufferPool.take and the actual
    // query processing together.
    final Number queryTimeout = query.getContextValue(QueryContextKeys.TIMEOUT, null);
    final long timeoutAt = queryTimeout == null ? JodaUtils.MAX_INSTANT : System.currentTimeMillis() + queryTimeout.longValue();
    return new BaseSequence<>(new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {

        @Override
        public CloseableGrouperIterator<RowBasedKey, Row> make() {
            final List<ReferenceCountingResourceHolder> resources = Lists.newArrayList();
            try {
                final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                final ReferenceCountingResourceHolder<LimitedTemporaryStorage> temporaryStorageHolder = ReferenceCountingResourceHolder.fromCloseable(temporaryStorage);
                resources.add(temporaryStorageHolder);
                final ReferenceCountingResourceHolder<ByteBuffer> mergeBufferHolder;
                try {
                    // This will potentially block if there are no merge buffers left in the pool.
                    final long timeout = timeoutAt - System.currentTimeMillis();
                    if (timeout <= 0 || (mergeBufferHolder = mergeBufferPool.take(timeout)) == null) {
                        throw new TimeoutException();
                    }
                    resources.add(mergeBufferHolder);
                } catch (Exception e) {
                    throw new QueryInterruptedException(e);
                }
                Pair<Grouper<RowBasedKey>, Accumulator<Grouper<RowBasedKey>, Row>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, false, null, config, Suppliers.ofInstance(mergeBufferHolder.get()), concurrencyHint, temporaryStorage, spillMapper, combiningAggregatorFactories);
                final Grouper<RowBasedKey> grouper = pair.lhs;
                final Accumulator<Grouper<RowBasedKey>, Row> accumulator = pair.rhs;
                grouper.init();
                final ReferenceCountingResourceHolder<Grouper<RowBasedKey>> grouperHolder = ReferenceCountingResourceHolder.fromCloseable(grouper);
                resources.add(grouperHolder);
                ListenableFuture<List<Boolean>> futures = Futures.allAsList(Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<Row>, ListenableFuture<Boolean>>() {

                    @Override
                    public ListenableFuture<Boolean> apply(final QueryRunner<Row> input) {
                        if (input == null) {
                            throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
                        }
                        ListenableFuture<Boolean> future = exec.submit(new AbstractPrioritizedCallable<Boolean>(priority) {

                            @Override
                            public Boolean call() throws Exception {
                                try (Releaser bufferReleaser = mergeBufferHolder.increment();
                                    Releaser grouperReleaser = grouperHolder.increment()) {
                                    final Object retVal = input.run(queryForRunners, responseContext).accumulate(grouper, accumulator);
                                    // Return true if OK, false if resources were exhausted.
                                    return retVal == grouper;
                                } catch (QueryInterruptedException e) {
                                    throw e;
                                } catch (Exception e) {
                                    log.error(e, "Exception with one of the sequences!");
                                    throw Throwables.propagate(e);
                                }
                            }
                        });
                        if (isSingleThreaded) {
                            waitForFutureCompletion(query, Futures.allAsList(ImmutableList.of(future)), timeoutAt - System.currentTimeMillis());
                        }
                        return future;
                    }
                })));
                if (!isSingleThreaded) {
                    waitForFutureCompletion(query, futures, timeoutAt - System.currentTimeMillis());
                }
                return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {

                    @Override
                    public void close() throws IOException {
                        for (Closeable closeable : Lists.reverse(resources)) {
                            CloseQuietly.close(closeable);
                        }
                    }
                });
            } catch (Throwable e) {
                // Exception caught while setting up the iterator; release resources.
                for (Closeable closeable : Lists.reverse(resources)) {
                    CloseQuietly.close(closeable);
                }
                throw e;
            }
        }

        @Override
        public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
            iterFromMake.close();
        }
    });
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) Closeable(java.io.Closeable) ChainedExecutionQueryRunner(io.druid.query.ChainedExecutionQueryRunner) Function(com.google.common.base.Function) GroupByQuery(io.druid.query.groupby.GroupByQuery) Releaser(io.druid.collections.Releaser) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) ISE(io.druid.java.util.common.ISE) TimeoutException(java.util.concurrent.TimeoutException) QueryInterruptedException(io.druid.query.QueryInterruptedException) Pair(io.druid.java.util.common.Pair) GroupByQueryConfig(io.druid.query.groupby.GroupByQueryConfig) RowBasedKey(io.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) BaseSequence(io.druid.java.util.common.guava.BaseSequence) TimeoutException(java.util.concurrent.TimeoutException) CancellationException(java.util.concurrent.CancellationException) QueryInterruptedException(io.druid.query.QueryInterruptedException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ChainedExecutionQueryRunner(io.druid.query.ChainedExecutionQueryRunner) QueryRunner(io.druid.query.QueryRunner) ReferenceCountingResourceHolder(io.druid.collections.ReferenceCountingResourceHolder) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Row(io.druid.data.input.Row) File(java.io.File)

Aggregations

Accumulator (io.druid.java.util.common.guava.Accumulator)9 Pair (io.druid.java.util.common.Pair)5 Row (io.druid.data.input.Row)4 ISE (io.druid.java.util.common.ISE)4 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)4 GroupByQueryConfig (io.druid.query.groupby.GroupByQueryConfig)4 List (java.util.List)4 GroupByQuery (io.druid.query.groupby.GroupByQuery)3 Function (com.google.common.base.Function)2 ImmutableList (com.google.common.collect.ImmutableList)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)2 MapBasedRow (io.druid.data.input.MapBasedRow)2 BaseSequence (io.druid.java.util.common.guava.BaseSequence)2 Sequence (io.druid.java.util.common.guava.Sequence)2 Yielder (io.druid.java.util.common.guava.Yielder)2 YieldingAccumulator (io.druid.java.util.common.guava.YieldingAccumulator)2 QueryRunner (io.druid.query.QueryRunner)2 RowBasedColumnSelectorFactory (io.druid.query.groupby.RowBasedColumnSelectorFactory)2 RowBasedKey (io.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.RowBasedKey)2 SegmentMissingException (io.druid.segment.SegmentMissingException)2