use of org.apache.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class QueriesTest method testVerifyAggregations.
@Test
public void testVerifyAggregations() {
List<AggregatorFactory> aggFactories = Arrays.asList(new CountAggregatorFactory("count"), new DoubleSumAggregatorFactory("idx", "index"), new DoubleSumAggregatorFactory("rev", "revenue"));
List<PostAggregator> postAggs = Collections.singletonList(new ArithmeticPostAggregator("addStuff", "+", Arrays.asList(new FieldAccessPostAggregator("idx", "idx"), new FieldAccessPostAggregator("count", "count"))));
boolean exceptionOccured = false;
try {
Queries.prepareAggregations(ImmutableList.of(), aggFactories, postAggs);
} catch (IllegalArgumentException e) {
exceptionOccured = true;
}
Assert.assertFalse(exceptionOccured);
}
use of org.apache.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class QueriesTest method testVerifyAggregationsMultiLevelMissingVal.
@Test
public void testVerifyAggregationsMultiLevelMissingVal() {
List<AggregatorFactory> aggFactories = Arrays.asList(new CountAggregatorFactory("count"), new DoubleSumAggregatorFactory("idx", "index"), new DoubleSumAggregatorFactory("rev", "revenue"));
List<PostAggregator> postAggs = Arrays.asList(new ArithmeticPostAggregator("divideStuff", "/", Arrays.asList(new ArithmeticPostAggregator("addStuff", "+", Arrays.asList(new FieldAccessPostAggregator("idx", "idx"), new ConstantPostAggregator("const", 1))), new ArithmeticPostAggregator("subtractStuff", "-", Arrays.asList(new FieldAccessPostAggregator("rev", "rev2"), new ConstantPostAggregator("const", 1))))), new ArithmeticPostAggregator("addStuff", "+", Arrays.asList(new FieldAccessPostAggregator("divideStuff", "divideStuff"), new FieldAccessPostAggregator("count", "count"))));
boolean exceptionOccured = false;
try {
Queries.prepareAggregations(ImmutableList.of(), aggFactories, postAggs);
} catch (IllegalArgumentException e) {
exceptionOccured = true;
}
Assert.assertTrue(exceptionOccured);
}
use of org.apache.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class FloatFirstAggregationTest method testSerde.
@Test
public void testSerde() throws Exception {
DefaultObjectMapper mapper = new DefaultObjectMapper();
String floatSpecJson = "{\"type\":\"floatFirst\",\"name\":\"billy\",\"fieldName\":\"nilly\"}";
AggregatorFactory deserialized = mapper.readValue(floatSpecJson, AggregatorFactory.class);
Assert.assertEquals(floatFirstAggregatorFactory, deserialized);
Assert.assertArrayEquals(floatFirstAggregatorFactory.getCacheKey(), deserialized.getCacheKey());
}
use of org.apache.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class TimeseriesQueryEngine method processVectorized.
private Sequence<Result<TimeseriesResultValue>> processVectorized(final TimeseriesQuery query, final StorageAdapter adapter, @Nullable final Filter filter, final Interval queryInterval, final Granularity gran, final boolean descending) {
final boolean skipEmptyBuckets = query.isSkipEmptyBuckets();
final List<AggregatorFactory> aggregatorSpecs = query.getAggregatorSpecs();
final VectorCursor cursor = adapter.makeVectorCursor(filter, queryInterval, query.getVirtualColumns(), descending, QueryContexts.getVectorSize(query), null);
if (cursor == null) {
return Sequences.empty();
}
final Closer closer = Closer.create();
closer.register(cursor);
try {
final VectorCursorGranularizer granularizer = VectorCursorGranularizer.create(adapter, cursor, gran, queryInterval);
if (granularizer == null) {
return Sequences.empty();
}
final VectorColumnSelectorFactory columnSelectorFactory = cursor.getColumnSelectorFactory();
final AggregatorAdapters aggregators = closer.register(AggregatorAdapters.factorizeVector(columnSelectorFactory, query.getAggregatorSpecs()));
final ResourceHolder<ByteBuffer> bufferHolder = closer.register(bufferPool.take());
final ByteBuffer buffer = bufferHolder.get();
if (aggregators.spaceNeeded() > buffer.remaining()) {
throw new ISE("Not enough space for aggregators, needed [%,d] bytes but have only [%,d].", aggregators.spaceNeeded(), buffer.remaining());
}
return Sequences.withBaggage(Sequences.simple(granularizer.getBucketIterable()).map(bucketInterval -> {
// Whether or not the current bucket is empty
boolean emptyBucket = true;
while (!cursor.isDone()) {
granularizer.setCurrentOffsets(bucketInterval);
if (granularizer.getEndOffset() > granularizer.getStartOffset()) {
if (emptyBucket) {
aggregators.init(buffer, 0);
}
aggregators.aggregateVector(buffer, 0, granularizer.getStartOffset(), granularizer.getEndOffset());
emptyBucket = false;
}
if (!granularizer.advanceCursorWithinBucket()) {
break;
}
}
if (emptyBucket && skipEmptyBuckets) {
// Return null, will get filtered out later by the Objects::nonNull filter.
return null;
}
final TimeseriesResultBuilder bob = new TimeseriesResultBuilder(gran.toDateTime(bucketInterval.getStartMillis()));
if (emptyBucket) {
aggregators.init(buffer, 0);
}
for (int i = 0; i < aggregatorSpecs.size(); i++) {
bob.addMetric(aggregatorSpecs.get(i).getName(), aggregators.get(buffer, 0, i));
}
return bob.build();
}).filter(Objects::nonNull), closer);
} catch (Throwable t1) {
try {
closer.close();
} catch (Throwable t2) {
t1.addSuppressed(t2);
}
throw t1;
}
}
use of org.apache.druid.query.aggregation.AggregatorFactory in project druid by druid-io.
the class TimeseriesQueryEngine method processNonVectorized.
private Sequence<Result<TimeseriesResultValue>> processNonVectorized(final TimeseriesQuery query, final StorageAdapter adapter, @Nullable final Filter filter, final Interval queryInterval, final Granularity gran, final boolean descending) {
final boolean skipEmptyBuckets = query.isSkipEmptyBuckets();
final List<AggregatorFactory> aggregatorSpecs = query.getAggregatorSpecs();
return QueryRunnerHelper.makeCursorBasedQuery(adapter, Collections.singletonList(queryInterval), filter, query.getVirtualColumns(), descending, gran, cursor -> {
if (skipEmptyBuckets && cursor.isDone()) {
return null;
}
Aggregator[] aggregators = new Aggregator[aggregatorSpecs.size()];
String[] aggregatorNames = new String[aggregatorSpecs.size()];
for (int i = 0; i < aggregatorSpecs.size(); i++) {
aggregators[i] = aggregatorSpecs.get(i).factorize(cursor.getColumnSelectorFactory());
aggregatorNames[i] = aggregatorSpecs.get(i).getName();
}
try {
while (!cursor.isDone()) {
for (Aggregator aggregator : aggregators) {
aggregator.aggregate();
}
cursor.advance();
}
TimeseriesResultBuilder bob = new TimeseriesResultBuilder(cursor.getTime());
for (int i = 0; i < aggregatorSpecs.size(); i++) {
bob.addMetric(aggregatorNames[i], aggregators[i].get());
}
return bob.build();
} finally {
// cleanup
for (Aggregator agg : aggregators) {
agg.close();
}
}
});
}
Aggregations