use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class GroupByQueryQueryToolChest method getCacheStrategy.
@Override
public CacheStrategy<Row, Object, GroupByQuery> getCacheStrategy(final GroupByQuery query) {
return new CacheStrategy<Row, Object, GroupByQuery>() {
private static final byte CACHE_STRATEGY_VERSION = 0x1;
private final List<AggregatorFactory> aggs = query.getAggregatorSpecs();
private final List<DimensionSpec> dims = query.getDimensions();
@Override
public boolean isCacheable(GroupByQuery query, boolean willMergeRunners) {
return strategySelector.strategize(query).isCacheable(willMergeRunners);
}
@Override
public byte[] computeCacheKey(GroupByQuery query) {
return new CacheKeyBuilder(GROUPBY_QUERY).appendByte(CACHE_STRATEGY_VERSION).appendCacheable(query.getGranularity()).appendCacheable(query.getDimFilter()).appendCacheablesIgnoringOrder(query.getAggregatorSpecs()).appendCacheablesIgnoringOrder(query.getDimensions()).appendCacheable(query.getVirtualColumns()).build();
}
@Override
public TypeReference<Object> getCacheObjectClazz() {
return OBJECT_TYPE_REFERENCE;
}
@Override
public Function<Row, Object> prepareForCache() {
return new Function<Row, Object>() {
@Override
public Object apply(Row input) {
if (input instanceof MapBasedRow) {
final MapBasedRow row = (MapBasedRow) input;
final List<Object> retVal = Lists.newArrayListWithCapacity(1 + dims.size() + aggs.size());
retVal.add(row.getTimestamp().getMillis());
Map<String, Object> event = row.getEvent();
for (DimensionSpec dim : dims) {
retVal.add(event.get(dim.getOutputName()));
}
for (AggregatorFactory agg : aggs) {
retVal.add(event.get(agg.getName()));
}
return retVal;
}
throw new ISE("Don't know how to cache input rows of type[%s]", input.getClass());
}
};
}
@Override
public Function<Object, Row> pullFromCache() {
return new Function<Object, Row>() {
private final Granularity granularity = query.getGranularity();
@Override
public Row apply(Object input) {
Iterator<Object> results = ((List<Object>) input).iterator();
DateTime timestamp = granularity.toDateTime(((Number) results.next()).longValue());
Map<String, Object> event = Maps.newLinkedHashMap();
Iterator<DimensionSpec> dimsIter = dims.iterator();
while (dimsIter.hasNext() && results.hasNext()) {
final DimensionSpec factory = dimsIter.next();
event.put(factory.getOutputName(), results.next());
}
Iterator<AggregatorFactory> aggsIter = aggs.iterator();
while (aggsIter.hasNext() && results.hasNext()) {
final AggregatorFactory factory = aggsIter.next();
event.put(factory.getName(), factory.deserialize(results.next()));
}
if (dimsIter.hasNext() || aggsIter.hasNext() || results.hasNext()) {
throw new ISE("Found left over objects while reading from cache!! dimsIter[%s] aggsIter[%s] results[%s]", dimsIter.hasNext(), aggsIter.hasNext(), results.hasNext());
}
return new MapBasedRow(timestamp, event);
}
};
}
};
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class TopNQueryEngine method query.
public Sequence<Result<TopNResultValue>> query(final TopNQuery query, final StorageAdapter adapter) {
if (adapter == null) {
throw new SegmentMissingException("Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
}
final List<Interval> queryIntervals = query.getQuerySegmentSpec().getIntervals();
final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimensionsFilter()));
final Granularity granularity = query.getGranularity();
final Function<Cursor, Result<TopNResultValue>> mapFn = getMapFn(query, adapter);
Preconditions.checkArgument(queryIntervals.size() == 1, "Can only handle a single interval, got[%s]", queryIntervals);
return Sequences.filter(Sequences.map(adapter.makeCursors(filter, queryIntervals.get(0), query.getVirtualColumns(), granularity, query.isDescending()), new Function<Cursor, Result<TopNResultValue>>() {
@Override
public Result<TopNResultValue> apply(Cursor input) {
log.debug("Running over cursor[%s]", adapter.getInterval(), input.getTime());
return mapFn.apply(input);
}
}), Predicates.<Result<TopNResultValue>>notNull());
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class TimeseriesQueryQueryToolChest method getCacheStrategy.
@Override
public CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> getCacheStrategy(final TimeseriesQuery query) {
return new CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery>() {
private final List<AggregatorFactory> aggs = query.getAggregatorSpecs();
@Override
public boolean isCacheable(TimeseriesQuery query, boolean willMergeRunners) {
return true;
}
@Override
public byte[] computeCacheKey(TimeseriesQuery query) {
return new CacheKeyBuilder(TIMESERIES_QUERY).appendBoolean(query.isDescending()).appendBoolean(query.isSkipEmptyBuckets()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheablesIgnoringOrder(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns()).build();
}
@Override
public TypeReference<Object> getCacheObjectClazz() {
return OBJECT_TYPE_REFERENCE;
}
@Override
public Function<Result<TimeseriesResultValue>, Object> prepareForCache() {
return new Function<Result<TimeseriesResultValue>, Object>() {
@Override
public Object apply(final Result<TimeseriesResultValue> input) {
TimeseriesResultValue results = input.getValue();
final List<Object> retVal = Lists.newArrayListWithCapacity(1 + aggs.size());
retVal.add(input.getTimestamp().getMillis());
for (AggregatorFactory agg : aggs) {
retVal.add(results.getMetric(agg.getName()));
}
return retVal;
}
};
}
@Override
public Function<Object, Result<TimeseriesResultValue>> pullFromCache() {
return new Function<Object, Result<TimeseriesResultValue>>() {
private final Granularity granularity = query.getGranularity();
@Override
public Result<TimeseriesResultValue> apply(@Nullable Object input) {
List<Object> results = (List<Object>) input;
Map<String, Object> retVal = Maps.newLinkedHashMap();
Iterator<AggregatorFactory> aggsIter = aggs.iterator();
Iterator<Object> resultIter = results.iterator();
DateTime timestamp = granularity.toDateTime(((Number) resultIter.next()).longValue());
while (aggsIter.hasNext() && resultIter.hasNext()) {
final AggregatorFactory factory = aggsIter.next();
retVal.put(factory.getName(), factory.deserialize(resultIter.next()));
}
return new Result<TimeseriesResultValue>(timestamp, new TimeseriesResultValue(retVal));
}
};
}
};
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class QueryGranularityTest method testSerializeDuration.
@Test
public void testSerializeDuration() throws Exception {
ObjectMapper mapper = new DefaultObjectMapper();
String json = "{ \"type\": \"duration\", \"duration\": \"3600000\" }";
Granularity gran = mapper.readValue(json, Granularity.class);
Assert.assertEquals(new DurationGranularity(3600000, null), gran);
json = "{ \"type\": \"duration\", \"duration\": \"5\", \"origin\": \"2012-09-01T00:00:00.002Z\" }";
gran = mapper.readValue(json, Granularity.class);
Assert.assertEquals(new DurationGranularity(5, 2), gran);
DurationGranularity expected = new DurationGranularity(5, 2);
Assert.assertEquals(expected, mapper.readValue(mapper.writeValueAsString(expected), Granularity.class));
String illegalJson = "{ \"type\": \"duration\", \"duration\": \"0\" }";
try {
mapper.readValue(illegalJson, Granularity.class);
Assert.fail();
} catch (JsonMappingException e) {
}
}
use of io.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class QueryGranularityTest method testDurationToDateTime.
@Test
public void testDurationToDateTime() throws Exception {
final DateTime origin = new DateTime("2012-01-02T05:00:00.000-08:00");
Granularity gran = new DurationGranularity(new Period("PT12H5M").toStandardDuration().getMillis(), origin);
Assert.assertEquals(new DateTime("2012-01-01T05:00:04.123-08:00"), gran.toDateTime(new DateTime("2012-01-01T05:00:04.123-08:00").getMillis()));
}
Aggregations