use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesNoAggregators.
@Test
public void testTimeseriesNoAggregators() {
Granularity gran = Granularities.DAY;
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(gran).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).descending(descending).context(makeContext()).build();
Iterable<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
final DateTime expectedLast = descending ? QueryRunnerTestHelper.EARLIEST : QueryRunnerTestHelper.LAST;
Result lastResult = null;
for (Result<TimeseriesResultValue> result : results) {
DateTime current = result.getTimestamp();
Assert.assertFalse(StringUtils.format("Timestamp[%s] > expectedLast[%s]", current, expectedLast), descending ? current.isBefore(expectedLast) : current.isAfter(expectedLast));
Assert.assertEquals(ImmutableMap.of(), result.getValue().getBaseObject());
lastResult = result;
}
Assert.assertEquals(lastResult.toString(), expectedLast, lastResult.getTimestamp());
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class AbstractBatchIndexTask method tryTimeChunkLock.
protected boolean tryTimeChunkLock(TaskActionClient client, List<Interval> intervals) throws IOException {
// The given intervals are first converted to align with segment granularity. This is because,
// when an overwriting task finds a version for a given input row, it expects the interval
// associated to each version to be equal or larger than the time bucket where the input row falls in.
// See ParallelIndexSupervisorTask.findVersion().
final Iterator<Interval> intervalIterator;
final Granularity segmentGranularity = getSegmentGranularity();
if (segmentGranularity == null) {
intervalIterator = JodaUtils.condenseIntervals(intervals).iterator();
} else {
IntervalsByGranularity intervalsByGranularity = new IntervalsByGranularity(intervals, segmentGranularity);
// the following is calling a condense that does not materialize the intervals:
intervalIterator = JodaUtils.condensedIntervalsIterator(intervalsByGranularity.granularityIntervalsIterator());
}
// Intervals are already condensed to avoid creating too many locks.
// Intervals are also sorted and thus it's safe to compare only the previous interval and current one for dedup.
Interval prev = null;
int locksAcquired = 0;
while (intervalIterator.hasNext()) {
final Interval cur = intervalIterator.next();
if (prev != null && cur.equals(prev)) {
continue;
}
if (maxAllowedLockCount >= 0 && locksAcquired >= maxAllowedLockCount) {
throw new MaxAllowedLocksExceededException(maxAllowedLockCount);
}
prev = cur;
final TaskLock lock = client.submit(new TimeChunkLockTryAcquireAction(TaskLockType.EXCLUSIVE, cur));
if (lock == null) {
return false;
}
if (lock.isRevoked()) {
throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", cur));
}
locksAcquired++;
}
return true;
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class GranularityPathSpecTest method testBackwardCompatiblePeriodSegmentGranularitySerialization.
@Test
public void testBackwardCompatiblePeriodSegmentGranularitySerialization() throws JsonProcessingException {
final PeriodGranularity pt2S = new PeriodGranularity(new Period("PT2S"), null, DateTimeZone.UTC);
Assert.assertNotEquals("\"SECOND\"", jsonMapper.writeValueAsString(pt2S));
final Granularity pt1S = Granularities.SECOND;
Assert.assertEquals("\"SECOND\"", jsonMapper.writeValueAsString(pt1S));
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class GranularityPathSpecTest method testSetDataGranularity.
@Test
public void testSetDataGranularity() {
Granularity granularity = Granularities.DAY;
granularityPathSpec.setDataGranularity(granularity);
Assert.assertEquals(granularity, granularityPathSpec.getDataGranularity());
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class TopNQueryQueryToolChest method getCacheStrategy.
@Override
public CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> getCacheStrategy(final TopNQuery query) {
return new CacheStrategy<Result<TopNResultValue>, Object, TopNQuery>() {
private final List<AggregatorFactory> aggs = Lists.newArrayList(query.getAggregatorSpecs());
private final List<PostAggregator> postAggs = AggregatorUtil.pruneDependentPostAgg(query.getPostAggregatorSpecs(), query.getTopNMetricSpec().getMetricName(query.getDimensionSpec()));
@Override
public boolean isCacheable(TopNQuery query, boolean willMergeRunners) {
return true;
}
@Override
public byte[] computeCacheKey(TopNQuery query) {
final CacheKeyBuilder builder = new CacheKeyBuilder(TOPN_QUERY).appendCacheable(query.getDimensionSpec()).appendCacheable(query.getTopNMetricSpec()).appendInt(query.getThreshold()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns());
final List<PostAggregator> postAggregators = prunePostAggregators(query);
if (!postAggregators.isEmpty()) {
// Append post aggregators only when they are used as sort keys.
// Note that appending an empty list produces a different cache key from not appending it.
builder.appendCacheablesIgnoringOrder(postAggregators);
}
return builder.build();
}
@Override
public byte[] computeResultLevelCacheKey(TopNQuery query) {
final CacheKeyBuilder builder = new CacheKeyBuilder(TOPN_QUERY).appendCacheable(query.getDimensionSpec()).appendCacheable(query.getTopNMetricSpec()).appendInt(query.getThreshold()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns()).appendCacheables(query.getPostAggregatorSpecs());
return builder.build();
}
@Override
public TypeReference<Object> getCacheObjectClazz() {
return OBJECT_TYPE_REFERENCE;
}
@Override
public Function<Result<TopNResultValue>, Object> prepareForCache(boolean isResultLevelCache) {
return new Function<Result<TopNResultValue>, Object>() {
private final String[] aggFactoryNames = extractFactoryName(query.getAggregatorSpecs());
@Override
public Object apply(final Result<TopNResultValue> input) {
List<DimensionAndMetricValueExtractor> results = Lists.newArrayList(input.getValue());
final List<Object> retVal = Lists.newArrayListWithCapacity(results.size() + 1);
// make sure to preserve timezone information when caching results
retVal.add(input.getTimestamp().getMillis());
for (DimensionAndMetricValueExtractor result : results) {
List<Object> vals = Lists.newArrayListWithCapacity(aggFactoryNames.length + 2);
vals.add(result.getDimensionValue(query.getDimensionSpec().getOutputName()));
for (String aggName : aggFactoryNames) {
vals.add(result.getMetric(aggName));
}
if (isResultLevelCache) {
for (PostAggregator postAgg : query.getPostAggregatorSpecs()) {
vals.add(result.getMetric(postAgg.getName()));
}
}
retVal.add(vals);
}
return retVal;
}
};
}
@Override
public Function<Object, Result<TopNResultValue>> pullFromCache(boolean isResultLevelCache) {
return new Function<Object, Result<TopNResultValue>>() {
private final Granularity granularity = query.getGranularity();
@Override
public Result<TopNResultValue> apply(Object input) {
List<Object> results = (List<Object>) input;
List<Map<String, Object>> retVal = Lists.newArrayListWithCapacity(results.size());
Iterator<Object> inputIter = results.iterator();
DateTime timestamp = granularity.toDateTime(((Number) inputIter.next()).longValue());
while (inputIter.hasNext()) {
List<Object> result = (List<Object>) inputIter.next();
final Map<String, Object> vals = Maps.newLinkedHashMap();
Iterator<Object> resultIter = result.iterator();
// Must convert generic Jackson-deserialized type into the proper type.
vals.put(query.getDimensionSpec().getOutputName(), DimensionHandlerUtils.convertObjectToType(resultIter.next(), query.getDimensionSpec().getOutputType()));
CacheStrategy.fetchAggregatorsFromCache(aggs, resultIter, isResultLevelCache, (aggName, aggPos, aggValueObject) -> {
vals.put(aggName, aggValueObject);
});
if (isResultLevelCache) {
Iterator<PostAggregator> postItr = query.getPostAggregatorSpecs().iterator();
while (postItr.hasNext() && resultIter.hasNext()) {
vals.put(postItr.next().getName(), resultIter.next());
}
} else {
for (PostAggregator postAgg : postAggs) {
vals.put(postAgg.getName(), postAgg.compute(vals));
}
}
retVal.add(vals);
}
return new Result<>(timestamp, new TopNResultValue(retVal));
}
};
}
};
}
Aggregations