use of org.apache.druid.query.cache.CacheKeyBuilder in project druid by druid-io.
the class GroupByQueryQueryToolChest method getCacheStrategy.
@Override
public CacheStrategy<ResultRow, Object, GroupByQuery> getCacheStrategy(final GroupByQuery query) {
return new CacheStrategy<ResultRow, Object, GroupByQuery>() {
private static final byte CACHE_STRATEGY_VERSION = 0x1;
private final List<AggregatorFactory> aggs = query.getAggregatorSpecs();
private final List<DimensionSpec> dims = query.getDimensions();
@Override
public boolean isCacheable(GroupByQuery query, boolean willMergeRunners) {
return strategySelector.strategize(query).isCacheable(willMergeRunners);
}
@Override
public byte[] computeCacheKey(GroupByQuery query) {
CacheKeyBuilder builder = new CacheKeyBuilder(GROUPBY_QUERY).appendByte(CACHE_STRATEGY_VERSION).appendCacheable(query.getGranularity()).appendCacheable(query.getDimFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheables(query.getDimensions()).appendCacheable(query.getVirtualColumns());
if (query.isApplyLimitPushDown()) {
builder.appendCacheable(query.getLimitSpec());
}
return builder.build();
}
@Override
public byte[] computeResultLevelCacheKey(GroupByQuery query) {
final CacheKeyBuilder builder = new CacheKeyBuilder(GROUPBY_QUERY).appendByte(CACHE_STRATEGY_VERSION).appendCacheable(query.getGranularity()).appendCacheable(query.getDimFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheables(query.getDimensions()).appendCacheable(query.getVirtualColumns()).appendCacheable(query.getHavingSpec()).appendCacheable(query.getLimitSpec()).appendCacheables(query.getPostAggregatorSpecs());
if (query.getSubtotalsSpec() != null && !query.getSubtotalsSpec().isEmpty()) {
for (List<String> subTotalSpec : query.getSubtotalsSpec()) {
builder.appendStrings(subTotalSpec);
}
}
return builder.build();
}
@Override
public TypeReference<Object> getCacheObjectClazz() {
return OBJECT_TYPE_REFERENCE;
}
@Override
public Function<ResultRow, Object> prepareForCache(boolean isResultLevelCache) {
final boolean resultRowHasTimestamp = query.getResultRowHasTimestamp();
return new Function<ResultRow, Object>() {
@Override
public Object apply(ResultRow resultRow) {
final List<Object> retVal = new ArrayList<>(1 + dims.size() + aggs.size());
int inPos = 0;
if (resultRowHasTimestamp) {
retVal.add(resultRow.getLong(inPos++));
} else {
retVal.add(query.getUniversalTimestamp().getMillis());
}
for (int i = 0; i < dims.size(); i++) {
retVal.add(resultRow.get(inPos++));
}
for (int i = 0; i < aggs.size(); i++) {
retVal.add(resultRow.get(inPos++));
}
if (isResultLevelCache) {
for (int i = 0; i < query.getPostAggregatorSpecs().size(); i++) {
retVal.add(resultRow.get(inPos++));
}
}
return retVal;
}
};
}
@Override
public Function<Object, ResultRow> pullFromCache(boolean isResultLevelCache) {
final boolean resultRowHasTimestamp = query.getResultRowHasTimestamp();
final int dimensionStart = query.getResultRowDimensionStart();
final int aggregatorStart = query.getResultRowAggregatorStart();
final int postAggregatorStart = query.getResultRowPostAggregatorStart();
return new Function<Object, ResultRow>() {
private final Granularity granularity = query.getGranularity();
@Override
public ResultRow apply(Object input) {
Iterator<Object> results = ((List<Object>) input).iterator();
DateTime timestamp = granularity.toDateTime(((Number) results.next()).longValue());
final int size = isResultLevelCache ? query.getResultRowSizeWithPostAggregators() : query.getResultRowSizeWithoutPostAggregators();
final ResultRow resultRow = ResultRow.create(size);
if (resultRowHasTimestamp) {
resultRow.set(0, timestamp.getMillis());
}
final Iterator<DimensionSpec> dimsIter = dims.iterator();
int dimPos = 0;
while (dimsIter.hasNext() && results.hasNext()) {
final DimensionSpec dimensionSpec = dimsIter.next();
// Must convert generic Jackson-deserialized type into the proper type.
resultRow.set(dimensionStart + dimPos, DimensionHandlerUtils.convertObjectToType(results.next(), dimensionSpec.getOutputType()));
dimPos++;
}
CacheStrategy.fetchAggregatorsFromCache(aggs, results, isResultLevelCache, (aggName, aggPosition, aggValueObject) -> {
resultRow.set(aggregatorStart + aggPosition, aggValueObject);
});
if (isResultLevelCache) {
Iterator<PostAggregator> postItr = query.getPostAggregatorSpecs().iterator();
int postPos = 0;
while (postItr.hasNext() && results.hasNext()) {
resultRow.set(postAggregatorStart + postPos, results.next());
}
}
if (dimsIter.hasNext() || results.hasNext()) {
throw new ISE("Found left over objects while reading from cache!! dimsIter[%s] results[%s]", dimsIter.hasNext(), results.hasNext());
}
return resultRow;
}
};
}
};
}
use of org.apache.druid.query.cache.CacheKeyBuilder in project druid by druid-io.
the class TopNQueryQueryToolChest method getCacheStrategy.
@Override
public CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> getCacheStrategy(final TopNQuery query) {
return new CacheStrategy<Result<TopNResultValue>, Object, TopNQuery>() {
private final List<AggregatorFactory> aggs = Lists.newArrayList(query.getAggregatorSpecs());
private final List<PostAggregator> postAggs = AggregatorUtil.pruneDependentPostAgg(query.getPostAggregatorSpecs(), query.getTopNMetricSpec().getMetricName(query.getDimensionSpec()));
@Override
public boolean isCacheable(TopNQuery query, boolean willMergeRunners) {
return true;
}
@Override
public byte[] computeCacheKey(TopNQuery query) {
final CacheKeyBuilder builder = new CacheKeyBuilder(TOPN_QUERY).appendCacheable(query.getDimensionSpec()).appendCacheable(query.getTopNMetricSpec()).appendInt(query.getThreshold()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns());
final List<PostAggregator> postAggregators = prunePostAggregators(query);
if (!postAggregators.isEmpty()) {
// Append post aggregators only when they are used as sort keys.
// Note that appending an empty list produces a different cache key from not appending it.
builder.appendCacheablesIgnoringOrder(postAggregators);
}
return builder.build();
}
@Override
public byte[] computeResultLevelCacheKey(TopNQuery query) {
final CacheKeyBuilder builder = new CacheKeyBuilder(TOPN_QUERY).appendCacheable(query.getDimensionSpec()).appendCacheable(query.getTopNMetricSpec()).appendInt(query.getThreshold()).appendCacheable(query.getGranularity()).appendCacheable(query.getDimensionsFilter()).appendCacheables(query.getAggregatorSpecs()).appendCacheable(query.getVirtualColumns()).appendCacheables(query.getPostAggregatorSpecs());
return builder.build();
}
@Override
public TypeReference<Object> getCacheObjectClazz() {
return OBJECT_TYPE_REFERENCE;
}
@Override
public Function<Result<TopNResultValue>, Object> prepareForCache(boolean isResultLevelCache) {
return new Function<Result<TopNResultValue>, Object>() {
private final String[] aggFactoryNames = extractFactoryName(query.getAggregatorSpecs());
@Override
public Object apply(final Result<TopNResultValue> input) {
List<DimensionAndMetricValueExtractor> results = Lists.newArrayList(input.getValue());
final List<Object> retVal = Lists.newArrayListWithCapacity(results.size() + 1);
// make sure to preserve timezone information when caching results
retVal.add(input.getTimestamp().getMillis());
for (DimensionAndMetricValueExtractor result : results) {
List<Object> vals = Lists.newArrayListWithCapacity(aggFactoryNames.length + 2);
vals.add(result.getDimensionValue(query.getDimensionSpec().getOutputName()));
for (String aggName : aggFactoryNames) {
vals.add(result.getMetric(aggName));
}
if (isResultLevelCache) {
for (PostAggregator postAgg : query.getPostAggregatorSpecs()) {
vals.add(result.getMetric(postAgg.getName()));
}
}
retVal.add(vals);
}
return retVal;
}
};
}
@Override
public Function<Object, Result<TopNResultValue>> pullFromCache(boolean isResultLevelCache) {
return new Function<Object, Result<TopNResultValue>>() {
private final Granularity granularity = query.getGranularity();
@Override
public Result<TopNResultValue> apply(Object input) {
List<Object> results = (List<Object>) input;
List<Map<String, Object>> retVal = Lists.newArrayListWithCapacity(results.size());
Iterator<Object> inputIter = results.iterator();
DateTime timestamp = granularity.toDateTime(((Number) inputIter.next()).longValue());
while (inputIter.hasNext()) {
List<Object> result = (List<Object>) inputIter.next();
final Map<String, Object> vals = Maps.newLinkedHashMap();
Iterator<Object> resultIter = result.iterator();
// Must convert generic Jackson-deserialized type into the proper type.
vals.put(query.getDimensionSpec().getOutputName(), DimensionHandlerUtils.convertObjectToType(resultIter.next(), query.getDimensionSpec().getOutputType()));
CacheStrategy.fetchAggregatorsFromCache(aggs, resultIter, isResultLevelCache, (aggName, aggPos, aggValueObject) -> {
vals.put(aggName, aggValueObject);
});
if (isResultLevelCache) {
Iterator<PostAggregator> postItr = query.getPostAggregatorSpecs().iterator();
while (postItr.hasNext() && resultIter.hasNext()) {
vals.put(postItr.next().getName(), resultIter.next());
}
} else {
for (PostAggregator postAgg : postAggs) {
vals.put(postAgg.getName(), postAgg.compute(vals));
}
}
retVal.add(vals);
}
return new Result<>(timestamp, new TopNResultValue(retVal));
}
};
}
};
}
use of org.apache.druid.query.cache.CacheKeyBuilder in project druid by druid-io.
the class LookupExprMacro method apply.
@Override
public Expr apply(final List<Expr> args) {
if (args.size() != 2) {
throw new IAE("Function[%s] must have 2 arguments", name());
}
final Expr arg = args.get(0);
final Expr lookupExpr = args.get(1);
if (!lookupExpr.isLiteral() || lookupExpr.getLiteralValue() == null) {
throw new IAE("Function[%s] second argument must be a registered lookup name", name());
}
final String lookupName = lookupExpr.getLiteralValue().toString();
final RegisteredLookupExtractionFn extractionFn = new RegisteredLookupExtractionFn(lookupExtractorFactoryContainerProvider, lookupName, false, null, false, null);
class LookupExpr extends ExprMacroTable.BaseScalarUnivariateMacroFunctionExpr {
private LookupExpr(Expr arg) {
super(FN_NAME, arg);
}
@Nonnull
@Override
public ExprEval eval(final ObjectBinding bindings) {
return ExprEval.of(extractionFn.apply(NullHandling.emptyToNullIfNeeded(arg.eval(bindings).asString())));
}
@Override
public Expr visit(Shuttle shuttle) {
return shuttle.visit(apply(shuttle.visitAll(args)));
}
@Nullable
@Override
public ExpressionType getOutputType(InputBindingInspector inspector) {
return ExpressionType.STRING;
}
@Override
public String stringify() {
return StringUtils.format("%s(%s, %s)", FN_NAME, arg.stringify(), lookupExpr.stringify());
}
@Override
public byte[] getCacheKey() {
return new CacheKeyBuilder(Exprs.LOOKUP_EXPR_CACHE_KEY).appendString(stringify()).appendCacheable(extractionFn).build();
}
}
return new LookupExpr(arg);
}
use of org.apache.druid.query.cache.CacheKeyBuilder in project druid by druid-io.
the class InDimFilter method computeCacheKey.
private byte[] computeCacheKey() {
final Collection<String> sortedValues;
if (values instanceof SortedSet && isNaturalOrder(((SortedSet<String>) values).comparator())) {
// Avoid copying "values" when it is already in the order we need for cache key computation.
sortedValues = values;
} else {
final List<String> sortedValuesList = new ArrayList<>(values);
sortedValuesList.sort(Comparators.naturalNullsFirst());
sortedValues = sortedValuesList;
}
// Hash all values, in sorted order, as their length followed by their content.
final Hasher hasher = Hashing.sha256().newHasher();
for (String v : sortedValues) {
if (v == null) {
// Encode null as length -1, no content.
hasher.putInt(-1);
} else {
hasher.putInt(v.length());
hasher.putString(v, StandardCharsets.UTF_8);
}
}
return new CacheKeyBuilder(DimFilterUtils.IN_CACHE_ID).appendString(dimension).appendByte(DimFilterUtils.STRING_SEPARATOR).appendByteArray(extractionFn == null ? new byte[0] : extractionFn.getCacheKey()).appendByte(DimFilterUtils.STRING_SEPARATOR).appendByteArray(hasher.hash().asBytes()).build();
}
Aggregations