Search in sources :

Example 1 with GeneratorSchemaInfo

use of org.apache.druid.segment.generator.GeneratorSchemaInfo in project druid by druid-io.

the class SqlVectorizedExpressionSanityTest method setupClass.

@BeforeClass
public static void setupClass() {
    Calcites.setSystemProperties();
    ExpressionProcessing.initializeForStrictBooleansTests(true);
    CLOSER = Closer.create();
    final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("expression-testbench");
    final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
    final SegmentGenerator segmentGenerator = CLOSER.register(new SegmentGenerator());
    INDEX = CLOSER.register(segmentGenerator.generate(dataSegment, schemaInfo, Granularities.HOUR, ROWS_PER_SEGMENT));
    CONGLOMERATE = QueryStackTests.createQueryRunnerFactoryConglomerate(CLOSER);
    WALKER = new SpecificSegmentsQuerySegmentWalker(CONGLOMERATE).add(dataSegment, INDEX);
    CLOSER.register(WALKER);
    final PlannerConfig plannerConfig = new PlannerConfig();
    final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(CONGLOMERATE, WALKER, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    PLANNER_FACTORY = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(WALKER, CONGLOMERATE), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
}
Also used : SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) DataSegment(org.apache.druid.timeline.DataSegment) BeforeClass(org.junit.BeforeClass)

Example 2 with GeneratorSchemaInfo

use of org.apache.druid.segment.generator.GeneratorSchemaInfo in project druid by druid-io.

the class ExpressionVectorSelectorsTest method setupClass.

@BeforeClass
public static void setupClass() {
    CLOSER = Closer.create();
    final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("expression-testbench");
    final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
    final SegmentGenerator segmentGenerator = CLOSER.register(new SegmentGenerator());
    INDEX = CLOSER.register(segmentGenerator.generate(dataSegment, schemaInfo, Granularities.HOUR, ROWS_PER_SEGMENT));
}
Also used : SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) DataSegment(org.apache.druid.timeline.DataSegment) BeforeClass(org.junit.BeforeClass)

Example 3 with GeneratorSchemaInfo

use of org.apache.druid.segment.generator.GeneratorSchemaInfo in project druid by druid-io.

the class CachingClusteredClientBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    final String schemaName = "basic";
    parallelCombine = parallelism > 0;
    GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    Map<DataSegment, QueryableIndex> queryableIndexes = Maps.newHashMapWithExpectedSize(numServers);
    for (int i = 0; i < numServers; i++) {
        final DataSegment dataSegment = DataSegment.builder().dataSource(DATA_SOURCE).interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(i)).size(0).build();
        final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
        LOG.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
        final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
        queryableIndexes.put(dataSegment, index);
    }
    final DruidProcessingConfig processingConfig = new DruidProcessingConfig() {

        @Override
        public String getFormatString() {
            return null;
        }

        @Override
        public int intermediateComputeSizeBytes() {
            return PROCESSING_BUFFER_SIZE;
        }

        @Override
        public int getNumMergeBuffers() {
            return 1;
        }

        @Override
        public int getNumThreads() {
            return numProcessingThreads;
        }

        @Override
        public boolean useParallelMergePool() {
            return true;
        }
    };
    conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(new StupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, makeGroupByQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {

        @Override
        public String getDefaultStrategy() {
            return GroupByStrategySelector.STRATEGY_V2;
        }
    }, processingConfig)).build());
    toolChestWarehouse = new QueryToolChestWarehouse() {

        @Override
        public <T, QueryType extends Query<T>> QueryToolChest<T, QueryType> getToolChest(final QueryType query) {
            return conglomerate.findFactory(query).getToolchest();
        }
    };
    SimpleServerView serverView = new SimpleServerView();
    int serverSuffx = 1;
    for (Entry<DataSegment, QueryableIndex> entry : queryableIndexes.entrySet()) {
        serverView.addServer(createServer(serverSuffx++), entry.getKey(), entry.getValue());
    }
    processingPool = Execs.multiThreaded(processingConfig.getNumThreads(), "caching-clustered-client-benchmark");
    forkJoinPool = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * 0.75), ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);
    cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, serverView, MapCache.create(0), JSON_MAPPER, new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), processingConfig, forkJoinPool, QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
}
Also used : TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) Query(org.apache.druid.query.Query) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) GroupByQueryQueryToolChest(org.apache.druid.query.groupby.GroupByQueryQueryToolChest) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) QueryToolChest(org.apache.druid.query.QueryToolChest) DataSegment(org.apache.druid.timeline.DataSegment) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) QueryToolChestWarehouse(org.apache.druid.query.QueryToolChestWarehouse) CacheConfig(org.apache.druid.client.cache.CacheConfig) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) CachingClusteredClient(org.apache.druid.client.CachingClusteredClient) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) GroupByQueryRunnerFactory(org.apache.druid.query.groupby.GroupByQueryRunnerFactory) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TopNQueryConfig(org.apache.druid.query.topn.TopNQueryConfig) QueryableIndex(org.apache.druid.segment.QueryableIndex) StupidPool(org.apache.druid.collections.StupidPool) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) ForkJoinPool(java.util.concurrent.ForkJoinPool) Setup(org.openjdk.jmh.annotations.Setup)

Example 4 with GeneratorSchemaInfo

use of org.apache.druid.segment.generator.GeneratorSchemaInfo in project druid by druid-io.

the class TimeseriesBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    Map<String, TimeseriesQuery> basicQueries = new LinkedHashMap<>();
    GeneratorSchemaInfo basicSchema = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
    {
        // basic.A
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
        queryAggs.add(new LongMaxAggregatorFactory("maxLongUniform", "maxLongUniform"));
        queryAggs.add(new DoubleSumAggregatorFactory("sumFloatNormal", "sumFloatNormal"));
        queryAggs.add(new DoubleMinAggregatorFactory("minFloatZipf", "minFloatZipf"));
        queryAggs.add(new HyperUniquesAggregatorFactory("hyperUniquesMet", "hyper"));
        TimeseriesQuery queryA = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
        basicQueries.put("A", queryA);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        BoundDimFilter timeFilter = new BoundDimFilter(ColumnHolder.TIME_COLUMN_NAME, "200000", "300000", false, false, null, null, StringComparators.NUMERIC);
        queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
        basicQueries.put("timeFilterNumeric", timeFilterQuery);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        BoundDimFilter timeFilter = new BoundDimFilter(ColumnHolder.TIME_COLUMN_NAME, "200000", "300000", false, false, null, null, StringComparators.ALPHANUMERIC);
        queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
        basicQueries.put("timeFilterAlphanumeric", timeFilterQuery);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(200000, 300000)));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        queryAggs.add(lsaf);
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
        basicQueries.put("timeFilterByInterval", timeFilterQuery);
    }
    SCHEMA_QUERY_MAP.put("basic", basicQueries);
}
Also used : FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) DoubleMinAggregatorFactory(org.apache.druid.query.aggregation.DoubleMinAggregatorFactory) LinkedHashMap(java.util.LinkedHashMap) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) List(java.util.List) ArrayList(java.util.ArrayList) LongMaxAggregatorFactory(org.apache.druid.query.aggregation.LongMaxAggregatorFactory)

Example 5 with GeneratorSchemaInfo

use of org.apache.druid.segment.generator.GeneratorSchemaInfo in project druid by druid-io.

the class TimeCompareBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    GeneratorSchemaInfo basicSchema = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
    QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
    long startMillis = basicSchema.getDataInterval().getStartMillis();
    long endMillis = basicSchema.getDataInterval().getEndMillis();
    long half = (endMillis - startMillis) / 2;
    Interval recent = Intervals.utc(half, endMillis);
    Interval previous = Intervals.utc(startMillis, half);
    log.info("Recent interval: " + recent);
    log.info("Previous interval: " + previous);
    {
        // basic.topNTimeCompare
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new FilteredAggregatorFactory(// jsAgg1,
        new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"), new IntervalDimFilter(ColumnHolder.TIME_COLUMN_NAME, Collections.singletonList(recent), null)));
        queryAggs.add(new FilteredAggregatorFactory(new LongSumAggregatorFactory("_cmp_sumLongSequential", "sumLongSequential"), new IntervalDimFilter(ColumnHolder.TIME_COLUMN_NAME, Collections.singletonList(previous), null)));
        TopNQueryBuilder queryBuilderA = new TopNQueryBuilder().dataSource("blah").granularity(Granularities.ALL).dimension("dimUniform").metric("sumLongSequential").intervals(intervalSpec).aggregators(queryAggs).threshold(threshold);
        topNQuery = queryBuilderA.build();
        topNFactory = new TopNQueryRunnerFactory(new StupidPool<>("TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
    }
    {
        // basic.timeseriesTimeCompare
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new FilteredAggregatorFactory(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"), new IntervalDimFilter(ColumnHolder.TIME_COLUMN_NAME, Collections.singletonList(recent), null)));
        queryAggs.add(new FilteredAggregatorFactory(new LongSumAggregatorFactory("_cmp_sumLongSequential", "sumLongSequential"), new IntervalDimFilter(ColumnHolder.TIME_COLUMN_NAME, Collections.singletonList(previous), null)));
        Druids.TimeseriesQueryBuilder timeseriesQueryBuilder = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false);
        timeseriesQuery = timeseriesQueryBuilder.build();
        timeseriesFactory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
    }
}
Also used : FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) OffheapBufferGenerator(org.apache.druid.offheap.OffheapBufferGenerator) TopNQueryConfig(org.apache.druid.query.topn.TopNQueryConfig) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) List(java.util.List) ArrayList(java.util.ArrayList) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) IntervalDimFilter(org.apache.druid.query.filter.IntervalDimFilter) Interval(org.joda.time.Interval)

Aggregations

GeneratorSchemaInfo (org.apache.druid.segment.generator.GeneratorSchemaInfo)21 SegmentGenerator (org.apache.druid.segment.generator.SegmentGenerator)12 DataSegment (org.apache.druid.timeline.DataSegment)12 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)12 Setup (org.openjdk.jmh.annotations.Setup)9 ArrayList (java.util.ArrayList)7 LinkedHashMap (java.util.LinkedHashMap)7 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)7 QuerySegmentSpec (org.apache.druid.query.spec.QuerySegmentSpec)7 List (java.util.List)6 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)6 DoubleSumAggregatorFactory (org.apache.druid.query.aggregation.DoubleSumAggregatorFactory)5 QueryableIndex (org.apache.druid.segment.QueryableIndex)5 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)4 DoubleMinAggregatorFactory (org.apache.druid.query.aggregation.DoubleMinAggregatorFactory)4 PlannerConfig (org.apache.druid.sql.calcite.planner.PlannerConfig)4 PlannerFactory (org.apache.druid.sql.calcite.planner.PlannerFactory)4 DruidSchemaCatalog (org.apache.druid.sql.calcite.schema.DruidSchemaCatalog)4 SpecificSegmentsQuerySegmentWalker (org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker)4 QueryRunnerFactoryConglomerate (org.apache.druid.query.QueryRunnerFactoryConglomerate)3