Search in sources :

Example 11 with MapJoinableFactory

use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.

the class MovingAverageQueryTest method testQuery.

/**
 * Validate that the specified query behaves correctly.
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
    Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
    Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
    List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
    Assert.assertNotNull(expectedResults);
    Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
    CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {

        @Override
        public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
            return Optional.empty();
        }

        @Override
        public List<ImmutableDruidServer> getDruidServers() {
            return null;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
            return null;
        }

        @Override
        public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
        }

        @Override
        public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
        }

        @Override
        public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
        }
    }, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {

        @Override
        public long getMaxQueuedBytes() {
            return 0L;
        }
    }, new DruidProcessingConfig() {

        @Override
        public String getFormatString() {
            return null;
        }
    }, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
    ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {

        @Override
        public void emit(Event event) {
        }
    }, baseClient, null, /* local client; unused in this test, so pass in null */
    warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
    defineMocks();
    QueryPlus queryPlus = QueryPlus.wrap(query);
    final Sequence<?> res = query.getRunner(walker).run(queryPlus);
    List actualResults = new ArrayList();
    actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
    expectedResults = consistentTypeCasting(expectedResults);
    actualResults = consistentTypeCasting(actualResults);
    Assert.assertEquals(expectedResults, actualResults);
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) ArrayList(java.util.ArrayList) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Executor(java.util.concurrent.Executor) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) List(java.util.List) ArrayList(java.util.ArrayList) TimelineServerView(org.apache.druid.client.TimelineServerView) CacheConfig(org.apache.druid.client.cache.CacheConfig) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) QueryPlus(org.apache.druid.query.QueryPlus) CachingClusteredClient(org.apache.druid.client.CachingClusteredClient) Optional(java.util.Optional) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) QueryRunner(org.apache.druid.query.QueryRunner) ClientQuerySegmentWalker(org.apache.druid.server.ClientQuerySegmentWalker) Event(org.apache.druid.java.util.emitter.core.Event) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) TimelineLookup(org.apache.druid.timeline.TimelineLookup) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 12 with MapJoinableFactory

use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.

the class CachingClusteredClientBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    final String schemaName = "basic";
    parallelCombine = parallelism > 0;
    GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    Map<DataSegment, QueryableIndex> queryableIndexes = Maps.newHashMapWithExpectedSize(numServers);
    for (int i = 0; i < numServers; i++) {
        final DataSegment dataSegment = DataSegment.builder().dataSource(DATA_SOURCE).interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(i)).size(0).build();
        final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
        LOG.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
        final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
        queryableIndexes.put(dataSegment, index);
    }
    final DruidProcessingConfig processingConfig = new DruidProcessingConfig() {

        @Override
        public String getFormatString() {
            return null;
        }

        @Override
        public int intermediateComputeSizeBytes() {
            return PROCESSING_BUFFER_SIZE;
        }

        @Override
        public int getNumMergeBuffers() {
            return 1;
        }

        @Override
        public int getNumThreads() {
            return numProcessingThreads;
        }

        @Override
        public boolean useParallelMergePool() {
            return true;
        }
    };
    conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(new StupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, makeGroupByQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {

        @Override
        public String getDefaultStrategy() {
            return GroupByStrategySelector.STRATEGY_V2;
        }
    }, processingConfig)).build());
    toolChestWarehouse = new QueryToolChestWarehouse() {

        @Override
        public <T, QueryType extends Query<T>> QueryToolChest<T, QueryType> getToolChest(final QueryType query) {
            return conglomerate.findFactory(query).getToolchest();
        }
    };
    SimpleServerView serverView = new SimpleServerView();
    int serverSuffx = 1;
    for (Entry<DataSegment, QueryableIndex> entry : queryableIndexes.entrySet()) {
        serverView.addServer(createServer(serverSuffx++), entry.getKey(), entry.getValue());
    }
    processingPool = Execs.multiThreaded(processingConfig.getNumThreads(), "caching-clustered-client-benchmark");
    forkJoinPool = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * 0.75), ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);
    cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, serverView, MapCache.create(0), JSON_MAPPER, new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), processingConfig, forkJoinPool, QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
}
Also used : TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) Query(org.apache.druid.query.Query) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) GroupByQueryQueryToolChest(org.apache.druid.query.groupby.GroupByQueryQueryToolChest) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) QueryToolChest(org.apache.druid.query.QueryToolChest) DataSegment(org.apache.druid.timeline.DataSegment) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) QueryToolChestWarehouse(org.apache.druid.query.QueryToolChestWarehouse) CacheConfig(org.apache.druid.client.cache.CacheConfig) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) CachingClusteredClient(org.apache.druid.client.CachingClusteredClient) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) GroupByQueryRunnerFactory(org.apache.druid.query.groupby.GroupByQueryRunnerFactory) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TopNQueryConfig(org.apache.druid.query.topn.TopNQueryConfig) QueryableIndex(org.apache.druid.segment.QueryableIndex) StupidPool(org.apache.druid.collections.StupidPool) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) ForkJoinPool(java.util.concurrent.ForkJoinPool) Setup(org.openjdk.jmh.annotations.Setup)

Example 13 with MapJoinableFactory

use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.

the class QueryStackTests method makeJoinableFactoryFromDefault.

public static JoinableFactory makeJoinableFactoryFromDefault(@Nullable LookupExtractorFactoryContainerProvider lookupProvider, @Nullable Set<JoinableFactory> customFactories, @Nullable Map<Class<? extends JoinableFactory>, Class<? extends DataSource>> customMappings) {
    ImmutableSet.Builder<JoinableFactory> setBuilder = ImmutableSet.builder();
    ImmutableMap.Builder<Class<? extends JoinableFactory>, Class<? extends DataSource>> mapBuilder = ImmutableMap.builder();
    setBuilder.add(new InlineJoinableFactory());
    mapBuilder.put(InlineJoinableFactory.class, InlineDataSource.class);
    if (lookupProvider != null) {
        setBuilder.add(new LookupJoinableFactory(lookupProvider));
        mapBuilder.put(LookupJoinableFactory.class, LookupDataSource.class);
    }
    if (customFactories != null) {
        setBuilder.addAll(customFactories);
    }
    if (customMappings != null) {
        mapBuilder.putAll(customMappings);
    }
    return new MapJoinableFactory(setBuilder.build(), mapBuilder.build());
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) LookupJoinableFactory(org.apache.druid.segment.join.LookupJoinableFactory) InlineJoinableFactory(org.apache.druid.segment.join.InlineJoinableFactory) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) InlineJoinableFactory(org.apache.druid.segment.join.InlineJoinableFactory) LookupJoinableFactory(org.apache.druid.segment.join.LookupJoinableFactory) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) ImmutableMap(com.google.common.collect.ImmutableMap) DataSource(org.apache.druid.query.DataSource) LookupDataSource(org.apache.druid.query.LookupDataSource) InlineDataSource(org.apache.druid.query.InlineDataSource)

Example 14 with MapJoinableFactory

use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.

/**
 * This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
 * It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
 * {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
 * more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
 * BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
 * processing. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    // Trigger updates of BrokerServerView. This should be done asynchronously.
    // add completely new segments
    addSegmentsToCluster(numExistingSegments, numServers, 50);
    // add replicas of the first 30 segments.
    addReplicasToCluster(1, numServers, 30);
    // for the first 30 segments, we will still have replicas.
    // for the other 20 segments, they will be completely removed from the cluster.
    removeSegmentsFromCluster(numServers, 50);
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertTrue(hasTimeline);
        // We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) TableDataSource(org.apache.druid.query.TableDataSource) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Example 15 with MapJoinableFactory

use of org.apache.druid.segment.join.MapJoinableFactory in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata.

/**
 * This tests the contention between 2 methods of DruidSchema, {@link DruidSchema#refresh} and
 * {@link DruidSchema#getSegmentMetadataSnapshot()}. It first triggers refreshing DruidSchema.
 * To mimic some heavy work done with {@link DruidSchema#lock}, {@link DruidSchema#buildDruidTable} is overriden
 * to sleep before doing real work. While refreshing DruidSchema, getSegmentMetadataSnapshot() is continuously
 * called to mimic reading the segments table of SystemSchema. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = exec.submit(() -> schema.getSegmentMetadataSnapshot()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertFalse(segmentsMetadata.isEmpty());
        // We want to call getTimeline while refreshing. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Aggregations

MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)20 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)15 NoopEscalator (org.apache.druid.server.security.NoopEscalator)15 Test (org.junit.Test)15 DataSegment (org.apache.druid.timeline.DataSegment)14 CountDownLatch (java.util.concurrent.CountDownLatch)11 SegmentId (org.apache.druid.timeline.SegmentId)11 ImmutableSet (com.google.common.collect.ImmutableSet)10 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)10 TableDataSource (org.apache.druid.query.TableDataSource)10 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)10 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)10 SpecificSegmentsQuerySegmentWalker (org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker)10 TestServerInventoryView (org.apache.druid.sql.calcite.util.TestServerInventoryView)10 ImmutableMap (com.google.common.collect.ImmutableMap)9 File (java.io.File)9 IOException (java.io.IOException)9 List (java.util.List)9 DoubleSumAggregatorFactory (org.apache.druid.query.aggregation.DoubleSumAggregatorFactory)9 HyperUniquesAggregatorFactory (org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory)9