Search in sources :

Example 6 with AggregatorAdapters

use of org.apache.druid.query.aggregation.AggregatorAdapters in project druid by druid-io.

the class HashVectorGrouperTest method testGrowTwice.

@Test
public void testGrowTwice() {
    final int maxVectorSize = 512;
    final int keySize = 4;
    final int aggSize = 8;
    final WritableMemory keySpace = WritableMemory.allocate(keySize * maxVectorSize);
    final AggregatorAdapters aggregatorAdapters = Mockito.mock(AggregatorAdapters.class);
    Mockito.when(aggregatorAdapters.spaceNeeded()).thenReturn(aggSize);
    int startingNumBuckets = 4;
    int maxBuckets = 32;
    final int bufferSize = (keySize + aggSize) * maxBuckets;
    final ByteBuffer buffer = ByteBuffer.wrap(new byte[bufferSize]);
    final HashVectorGrouper grouper = new HashVectorGrouper(Suppliers.ofInstance(buffer), keySize, aggregatorAdapters, maxBuckets, 0.f, startingNumBuckets);
    grouper.initVectorized(maxVectorSize);
    int tableStart = grouper.getTableStart();
    // two keys should not cause buffer to grow
    fillKeyspace(keySpace, maxVectorSize, 2);
    AggregateResult result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
    Assert.assertTrue(result.isOk());
    Assert.assertEquals(tableStart, grouper.getTableStart());
    // 3rd key should cause buffer to grow
    // buffer should grow to next size, but is not full
    fillKeyspace(keySpace, maxVectorSize, 3);
    result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
    Assert.assertTrue(result.isOk());
    Assert.assertTrue(grouper.getTableStart() > tableStart);
    // this time should be all the way
    fillKeyspace(keySpace, maxVectorSize, 6);
    result = grouper.aggregateVector(keySpace, 0, maxVectorSize);
    Assert.assertTrue(result.isOk());
    Assert.assertEquals(0, grouper.getTableStart());
}
Also used : WritableMemory(org.apache.datasketches.memory.WritableMemory) AggregatorAdapters(org.apache.druid.query.aggregation.AggregatorAdapters) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 7 with AggregatorAdapters

use of org.apache.druid.query.aggregation.AggregatorAdapters in project druid by druid-io.

the class TimeseriesQueryEngine method processVectorized.

private Sequence<Result<TimeseriesResultValue>> processVectorized(final TimeseriesQuery query, final StorageAdapter adapter, @Nullable final Filter filter, final Interval queryInterval, final Granularity gran, final boolean descending) {
    final boolean skipEmptyBuckets = query.isSkipEmptyBuckets();
    final List<AggregatorFactory> aggregatorSpecs = query.getAggregatorSpecs();
    final VectorCursor cursor = adapter.makeVectorCursor(filter, queryInterval, query.getVirtualColumns(), descending, QueryContexts.getVectorSize(query), null);
    if (cursor == null) {
        return Sequences.empty();
    }
    final Closer closer = Closer.create();
    closer.register(cursor);
    try {
        final VectorCursorGranularizer granularizer = VectorCursorGranularizer.create(adapter, cursor, gran, queryInterval);
        if (granularizer == null) {
            return Sequences.empty();
        }
        final VectorColumnSelectorFactory columnSelectorFactory = cursor.getColumnSelectorFactory();
        final AggregatorAdapters aggregators = closer.register(AggregatorAdapters.factorizeVector(columnSelectorFactory, query.getAggregatorSpecs()));
        final ResourceHolder<ByteBuffer> bufferHolder = closer.register(bufferPool.take());
        final ByteBuffer buffer = bufferHolder.get();
        if (aggregators.spaceNeeded() > buffer.remaining()) {
            throw new ISE("Not enough space for aggregators, needed [%,d] bytes but have only [%,d].", aggregators.spaceNeeded(), buffer.remaining());
        }
        return Sequences.withBaggage(Sequences.simple(granularizer.getBucketIterable()).map(bucketInterval -> {
            // Whether or not the current bucket is empty
            boolean emptyBucket = true;
            while (!cursor.isDone()) {
                granularizer.setCurrentOffsets(bucketInterval);
                if (granularizer.getEndOffset() > granularizer.getStartOffset()) {
                    if (emptyBucket) {
                        aggregators.init(buffer, 0);
                    }
                    aggregators.aggregateVector(buffer, 0, granularizer.getStartOffset(), granularizer.getEndOffset());
                    emptyBucket = false;
                }
                if (!granularizer.advanceCursorWithinBucket()) {
                    break;
                }
            }
            if (emptyBucket && skipEmptyBuckets) {
                // Return null, will get filtered out later by the Objects::nonNull filter.
                return null;
            }
            final TimeseriesResultBuilder bob = new TimeseriesResultBuilder(gran.toDateTime(bucketInterval.getStartMillis()));
            if (emptyBucket) {
                aggregators.init(buffer, 0);
            }
            for (int i = 0; i < aggregatorSpecs.size(); i++) {
                bob.addMetric(aggregatorSpecs.get(i).getName(), aggregators.get(buffer, 0, i));
            }
            return bob.build();
        }).filter(Objects::nonNull), closer);
    } catch (Throwable t1) {
        try {
            closer.close();
        } catch (Throwable t2) {
            t1.addSuppressed(t2);
        }
        throw t1;
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) AggregatorAdapters(org.apache.druid.query.aggregation.AggregatorAdapters) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) VectorCursor(org.apache.druid.segment.vector.VectorCursor) ByteBuffer(java.nio.ByteBuffer) VectorColumnSelectorFactory(org.apache.druid.segment.vector.VectorColumnSelectorFactory) VectorCursorGranularizer(org.apache.druid.query.vector.VectorCursorGranularizer) ISE(org.apache.druid.java.util.common.ISE)

Example 8 with AggregatorAdapters

use of org.apache.druid.query.aggregation.AggregatorAdapters in project druid by druid-io.

the class HashVectorGrouperTest method testCloseAggregatorAdaptorsShouldBeClosed.

@Test
public void testCloseAggregatorAdaptorsShouldBeClosed() {
    final ByteBuffer buffer = ByteBuffer.wrap(new byte[4096]);
    final AggregatorAdapters aggregatorAdapters = Mockito.mock(AggregatorAdapters.class);
    final HashVectorGrouper grouper = new HashVectorGrouper(Suppliers.ofInstance(buffer), 1024, aggregatorAdapters, Integer.MAX_VALUE, 0.f, 0);
    grouper.initVectorized(512);
    grouper.close();
    Mockito.verify(aggregatorAdapters, Mockito.times(1)).close();
}
Also used : AggregatorAdapters(org.apache.druid.query.aggregation.AggregatorAdapters) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

ByteBuffer (java.nio.ByteBuffer)8 AggregatorAdapters (org.apache.druid.query.aggregation.AggregatorAdapters)8 Test (org.junit.Test)7 WritableMemory (org.apache.datasketches.memory.WritableMemory)5 ISE (org.apache.druid.java.util.common.ISE)1 Closer (org.apache.druid.java.util.common.io.Closer)1 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)1 VectorCursorGranularizer (org.apache.druid.query.vector.VectorCursorGranularizer)1 VectorColumnSelectorFactory (org.apache.druid.segment.vector.VectorColumnSelectorFactory)1 VectorCursor (org.apache.druid.segment.vector.VectorCursor)1