Search in sources :

Example 46 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class TimeseriesQueryRunnerTest method testTimeseriesNoAggregators.

@Test
public void testTimeseriesNoAggregators() {
    Granularity gran = Granularities.DAY;
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(gran).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).descending(descending).context(makeContext()).build();
    Iterable<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
    final DateTime expectedLast = descending ? QueryRunnerTestHelper.EARLIEST : QueryRunnerTestHelper.LAST;
    Result lastResult = null;
    for (Result<TimeseriesResultValue> result : results) {
        DateTime current = result.getTimestamp();
        Assert.assertFalse(StringUtils.format("Timestamp[%s] > expectedLast[%s]", current, expectedLast), descending ? current.isBefore(expectedLast) : current.isAfter(expectedLast));
        Assert.assertEquals(ImmutableMap.of(), result.getValue().getBaseObject());
        lastResult = result;
    }
    Assert.assertEquals(lastResult.toString(), expectedLast, lastResult.getTimestamp());
}
Also used : PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DateTime(org.joda.time.DateTime) Result(org.apache.druid.query.Result) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 47 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class AbstractBatchIndexTask method tryTimeChunkLock.

protected boolean tryTimeChunkLock(TaskActionClient client, List<Interval> intervals) throws IOException {
    // The given intervals are first converted to align with segment granularity. This is because,
    // when an overwriting task finds a version for a given input row, it expects the interval
    // associated to each version to be equal or larger than the time bucket where the input row falls in.
    // See ParallelIndexSupervisorTask.findVersion().
    final Iterator<Interval> intervalIterator;
    final Granularity segmentGranularity = getSegmentGranularity();
    if (segmentGranularity == null) {
        intervalIterator = JodaUtils.condenseIntervals(intervals).iterator();
    } else {
        IntervalsByGranularity intervalsByGranularity = new IntervalsByGranularity(intervals, segmentGranularity);
        // the following is calling a condense that does not materialize the intervals:
        intervalIterator = JodaUtils.condensedIntervalsIterator(intervalsByGranularity.granularityIntervalsIterator());
    }
    // Intervals are already condensed to avoid creating too many locks.
    // Intervals are also sorted and thus it's safe to compare only the previous interval and current one for dedup.
    Interval prev = null;
    int locksAcquired = 0;
    while (intervalIterator.hasNext()) {
        final Interval cur = intervalIterator.next();
        if (prev != null && cur.equals(prev)) {
            continue;
        }
        if (maxAllowedLockCount >= 0 && locksAcquired >= maxAllowedLockCount) {
            throw new MaxAllowedLocksExceededException(maxAllowedLockCount);
        }
        prev = cur;
        final TaskLock lock = client.submit(new TimeChunkLockTryAcquireAction(TaskLockType.EXCLUSIVE, cur));
        if (lock == null) {
            return false;
        }
        if (lock.isRevoked()) {
            throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", cur));
        }
        locksAcquired++;
    }
    return true;
}
Also used : TaskLock(org.apache.druid.indexing.common.TaskLock) MaxAllowedLocksExceededException(org.apache.druid.indexing.common.task.batch.MaxAllowedLocksExceededException) TimeChunkLockTryAcquireAction(org.apache.druid.indexing.common.actions.TimeChunkLockTryAcquireAction) ISE(org.apache.druid.java.util.common.ISE) LockGranularity(org.apache.druid.indexing.common.LockGranularity) Granularity(org.apache.druid.java.util.common.granularity.Granularity) IntervalsByGranularity(org.apache.druid.java.util.common.granularity.IntervalsByGranularity) IntervalsByGranularity(org.apache.druid.java.util.common.granularity.IntervalsByGranularity) Interval(org.joda.time.Interval)

Example 48 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class GranularityPathSpecTest method testBackwardCompatiblePeriodSegmentGranularitySerialization.

@Test
public void testBackwardCompatiblePeriodSegmentGranularitySerialization() throws JsonProcessingException {
    final PeriodGranularity pt2S = new PeriodGranularity(new Period("PT2S"), null, DateTimeZone.UTC);
    Assert.assertNotEquals("\"SECOND\"", jsonMapper.writeValueAsString(pt2S));
    final Granularity pt1S = Granularities.SECOND;
    Assert.assertEquals("\"SECOND\"", jsonMapper.writeValueAsString(pt1S));
}
Also used : PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Period(org.joda.time.Period) Granularity(org.apache.druid.java.util.common.granularity.Granularity) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Test(org.junit.Test)

Example 49 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class GranularityPathSpecTest method testSetDataGranularity.

@Test
public void testSetDataGranularity() {
    Granularity granularity = Granularities.DAY;
    granularityPathSpec.setDataGranularity(granularity);
    Assert.assertEquals(granularity, granularityPathSpec.getDataGranularity());
}
Also used : Granularity(org.apache.druid.java.util.common.granularity.Granularity) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Test(org.junit.Test)

Example 50 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class TopNQueryQueryToolChest method getCacheStrategy.

@Override
public CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> getCacheStrategy(final TopNQuery query) {
    return new CacheStrategy<Result<TopNResultValue>, Object, TopNQuery>() {

        private final List<AggregatorFactory> aggs = Lists.newArrayList(query.getAggregatorSpecs());

        private final List<PostAggregator> postAggs = AggregatorUtil.pruneDependentPostAgg(query.getPostAggregatorSpecs(), query.getTopNMetricSpec().getMetricName(query.getDimensionSpec()));

        @Override
        public boolean isCacheable(TopNQuery query, boolean willMergeRunners) {
            return true;
        }

        @Override
        public byte[] computeCacheKey(TopNQuery query) {
            final CacheKeyBuilder builder = new CacheKeyBuilder(TOPN_QUERY).appendCacheable(query.getDimensionSpec()).appendCacheable(query.getTopNMetricSpec()).appendInt(query.getThreshold()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns());
            final List<PostAggregator> postAggregators = prunePostAggregators(query);
            if (!postAggregators.isEmpty()) {
                // Append post aggregators only when they are used as sort keys.
                // Note that appending an empty list produces a different cache key from not appending it.
                builder.appendCacheablesIgnoringOrder(postAggregators);
            }
            return builder.build();
        }

        @Override
        public byte[] computeResultLevelCacheKey(TopNQuery query) {
            final CacheKeyBuilder builder = new CacheKeyBuilder(TOPN_QUERY).appendCacheable(query.getDimensionSpec()).appendCacheable(query.getTopNMetricSpec()).appendInt(query.getThreshold()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns()).appendCacheables(query.getPostAggregatorSpecs());
            return builder.build();
        }

        @Override
        public TypeReference<Object> getCacheObjectClazz() {
            return OBJECT_TYPE_REFERENCE;
        }

        @Override
        public Function<Result<TopNResultValue>, Object> prepareForCache(boolean isResultLevelCache) {
            return new Function<Result<TopNResultValue>, Object>() {

                private final String[] aggFactoryNames = extractFactoryName(query.getAggregatorSpecs());

                @Override
                public Object apply(final Result<TopNResultValue> input) {
                    List<DimensionAndMetricValueExtractor> results = Lists.newArrayList(input.getValue());
                    final List<Object> retVal = Lists.newArrayListWithCapacity(results.size() + 1);
                    // make sure to preserve timezone information when caching results
                    retVal.add(input.getTimestamp().getMillis());
                    for (DimensionAndMetricValueExtractor result : results) {
                        List<Object> vals = Lists.newArrayListWithCapacity(aggFactoryNames.length + 2);
                        vals.add(result.getDimensionValue(query.getDimensionSpec().getOutputName()));
                        for (String aggName : aggFactoryNames) {
                            vals.add(result.getMetric(aggName));
                        }
                        if (isResultLevelCache) {
                            for (PostAggregator postAgg : query.getPostAggregatorSpecs()) {
                                vals.add(result.getMetric(postAgg.getName()));
                            }
                        }
                        retVal.add(vals);
                    }
                    return retVal;
                }
            };
        }

        @Override
        public Function<Object, Result<TopNResultValue>> pullFromCache(boolean isResultLevelCache) {
            return new Function<Object, Result<TopNResultValue>>() {

                private final Granularity granularity = query.getGranularity();

                @Override
                public Result<TopNResultValue> apply(Object input) {
                    List<Object> results = (List<Object>) input;
                    List<Map<String, Object>> retVal = Lists.newArrayListWithCapacity(results.size());
                    Iterator<Object> inputIter = results.iterator();
                    DateTime timestamp = granularity.toDateTime(((Number) inputIter.next()).longValue());
                    while (inputIter.hasNext()) {
                        List<Object> result = (List<Object>) inputIter.next();
                        final Map<String, Object> vals = Maps.newLinkedHashMap();
                        Iterator<Object> resultIter = result.iterator();
                        // Must convert generic Jackson-deserialized type into the proper type.
                        vals.put(query.getDimensionSpec().getOutputName(), DimensionHandlerUtils.convertObjectToType(resultIter.next(), query.getDimensionSpec().getOutputType()));
                        CacheStrategy.fetchAggregatorsFromCache(aggs, resultIter, isResultLevelCache, (aggName, aggPos, aggValueObject) -> {
                            vals.put(aggName, aggValueObject);
                        });
                        if (isResultLevelCache) {
                            Iterator<PostAggregator> postItr = query.getPostAggregatorSpecs().iterator();
                            while (postItr.hasNext() && resultIter.hasNext()) {
                                vals.put(postItr.next().getName(), resultIter.next());
                            }
                        } else {
                            for (PostAggregator postAgg : postAggs) {
                                vals.put(postAgg.getName(), postAgg.compute(vals));
                            }
                        }
                        retVal.add(vals);
                    }
                    return new Result<>(timestamp, new TopNResultValue(retVal));
                }
            };
        }
    };
}
Also used : PostAggregator(org.apache.druid.query.aggregation.PostAggregator) CacheKeyBuilder(org.apache.druid.query.cache.CacheKeyBuilder) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DateTime(org.joda.time.DateTime) Result(org.apache.druid.query.Result) Function(com.google.common.base.Function) List(java.util.List) Map(java.util.Map) CacheStrategy(org.apache.druid.query.CacheStrategy)

Aggregations

Granularity (org.apache.druid.java.util.common.granularity.Granularity)58 Interval (org.joda.time.Interval)27 ArrayList (java.util.ArrayList)22 DateTime (org.joda.time.DateTime)19 Test (org.junit.Test)16 List (java.util.List)14 Map (java.util.Map)14 HashMap (java.util.HashMap)13 Nullable (javax.annotation.Nullable)12 PeriodGranularity (org.apache.druid.java.util.common.granularity.PeriodGranularity)12 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)12 Period (org.joda.time.Period)11 ISE (org.apache.druid.java.util.common.ISE)8 Result (org.apache.druid.query.Result)8 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)7 ImmutableList (com.google.common.collect.ImmutableList)7 VisibleForTesting (com.google.common.annotations.VisibleForTesting)6 ClientCompactionTaskGranularitySpec (org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec)6 LockGranularity (org.apache.druid.indexing.common.LockGranularity)6 Sequence (org.apache.druid.java.util.common.guava.Sequence)6