Search in sources :

Example 1 with BenchmarkSchemaInfo

use of io.druid.benchmark.datagen.BenchmarkSchemaInfo in project druid by druid-io.

the class GroupByTypeInterfaceBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    Map<String, GroupByQuery> basicQueries = new LinkedHashMap<>();
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    {
        // basic.A
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
        GroupByQuery queryString = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("dimSequential", null))).setAggregatorSpecs(queryAggs).setGranularity(Granularity.fromString(queryGranularity)).build();
        GroupByQuery queryLongFloat = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("metLongUniform", null), new DefaultDimensionSpec("metFloatNormal", null))).setAggregatorSpecs(queryAggs).setGranularity(Granularity.fromString(queryGranularity)).build();
        GroupByQuery queryLong = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("metLongUniform", null))).setAggregatorSpecs(queryAggs).setGranularity(Granularity.fromString(queryGranularity)).build();
        GroupByQuery queryFloat = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("metFloatNormal", null))).setAggregatorSpecs(queryAggs).setGranularity(Granularity.fromString(queryGranularity)).build();
        basicQueries.put("string", queryString);
        basicQueries.put("longFloat", queryLongFloat);
        basicQueries.put("long", queryLong);
        basicQueries.put("float", queryFloat);
    }
    {
        // basic.nested
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
        GroupByQuery subqueryA = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("dimSequential", null), new DefaultDimensionSpec("dimZipf", null))).setAggregatorSpecs(queryAggs).setGranularity(Granularities.DAY).build();
        GroupByQuery queryA = GroupByQuery.builder().setDataSource(subqueryA).setQuerySegmentSpec(intervalSpec).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("dimSequential", null))).setAggregatorSpecs(queryAggs).setGranularity(Granularities.WEEK).build();
        basicQueries.put("nested", queryA);
    }
    SCHEMA_QUERY_MAP.put("basic", basicQueries);
}
Also used : GroupByQuery(io.druid.query.groupby.GroupByQuery) BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) QuerySegmentSpec(io.druid.query.spec.QuerySegmentSpec) List(java.util.List) ArrayList(java.util.ArrayList) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) LinkedHashMap(java.util.LinkedHashMap)

Example 2 with BenchmarkSchemaInfo

use of io.druid.benchmark.datagen.BenchmarkSchemaInfo in project druid by druid-io.

the class SelectBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    Map<String, Druids.SelectQueryBuilder> basicQueries = new LinkedHashMap<>();
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    {
        // basic.A
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        Druids.SelectQueryBuilder queryBuilderA = Druids.newSelectQueryBuilder().dataSource(new TableDataSource("blah")).dimensionSpecs(DefaultDimensionSpec.toSpec(Arrays.<String>asList())).metrics(Arrays.<String>asList()).intervals(intervalSpec).granularity(Granularities.ALL).descending(false);
        basicQueries.put("A", queryBuilderA);
    }
    SCHEMA_QUERY_MAP.put("basic", basicQueries);
}
Also used : TableDataSource(io.druid.query.TableDataSource) BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) QuerySegmentSpec(io.druid.query.spec.QuerySegmentSpec) LinkedHashMap(java.util.LinkedHashMap)

Example 3 with BenchmarkSchemaInfo

use of io.druid.benchmark.datagen.BenchmarkSchemaInfo in project druid by druid-io.

the class SqlBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws Exception {
    tmpDir = Files.createTempDir();
    log.info("Starting benchmark setup using tmpDir[%s], rows[%,d].", tmpDir, rowsPerSegment);
    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }
    final BenchmarkSchemaInfo schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
    final List<InputRow> rows = Lists.newArrayList();
    for (int i = 0; i < rowsPerSegment; i++) {
        final InputRow row = dataGenerator.nextRow();
        if (i % 20000 == 0) {
            log.info("%,d/%,d rows generated.", i, rowsPerSegment);
        }
        rows.add(row);
    }
    log.info("%,d/%,d rows generated.", rows.size(), rowsPerSegment);
    final PlannerConfig plannerConfig = new PlannerConfig();
    final QueryRunnerFactoryConglomerate conglomerate = CalciteTests.queryRunnerFactoryConglomerate();
    final QueryableIndex index = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).indexMerger(TestHelper.getTestIndexMergerV9()).rows(rows).buildMMappedIndex();
    this.walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource("foo").interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index);
    final Map<String, Table> tableMap = ImmutableMap.<String, Table>of("foo", new DruidTable(new TableDataSource("foo"), RowSignature.builder().add("__time", ValueType.LONG).add("dimSequential", ValueType.STRING).add("dimZipf", ValueType.STRING).add("dimUniform", ValueType.STRING).build()));
    final Schema druidSchema = new AbstractSchema() {

        @Override
        protected Map<String, Table> getTableMap() {
            return tableMap;
        }
    };
    plannerFactory = new PlannerFactory(Calcites.createRootSchema(druidSchema), walker, CalciteTests.createOperatorTable(), plannerConfig);
    groupByQuery = GroupByQuery.builder().setDataSource("foo").setInterval(new Interval(JodaUtils.MIN_INSTANT, JodaUtils.MAX_INSTANT)).setDimensions(Arrays.<DimensionSpec>asList(new DefaultDimensionSpec("dimZipf", "d0"), new DefaultDimensionSpec("dimSequential", "d1"))).setAggregatorSpecs(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("c"))).setGranularity(Granularities.ALL).build();
    sqlQuery = "SELECT\n" + "  dimZipf AS d0," + "  dimSequential AS d1,\n" + "  COUNT(*) AS c\n" + "FROM druid.foo\n" + "GROUP BY dimZipf, dimSequential";
}
Also used : DruidTable(io.druid.sql.calcite.table.DruidTable) Table(org.apache.calcite.schema.Table) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) Schema(org.apache.calcite.schema.Schema) AbstractSchema(org.apache.calcite.schema.impl.AbstractSchema) BenchmarkDataGenerator(io.druid.benchmark.datagen.BenchmarkDataGenerator) HyperUniquesSerde(io.druid.query.aggregation.hyperloglog.HyperUniquesSerde) DruidTable(io.druid.sql.calcite.table.DruidTable) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) QueryRunnerFactoryConglomerate(io.druid.query.QueryRunnerFactoryConglomerate) SpecificSegmentsQuerySegmentWalker(io.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) TableDataSource(io.druid.query.TableDataSource) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) AbstractSchema(org.apache.calcite.schema.impl.AbstractSchema) QueryableIndex(io.druid.segment.QueryableIndex) BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) PlannerConfig(io.druid.sql.calcite.planner.PlannerConfig) InputRow(io.druid.data.input.InputRow) PlannerFactory(io.druid.sql.calcite.planner.PlannerFactory) File(java.io.File) Interval(org.joda.time.Interval) Setup(org.openjdk.jmh.annotations.Setup)

Example 4 with BenchmarkSchemaInfo

use of io.druid.benchmark.datagen.BenchmarkSchemaInfo in project druid by druid-io.

the class TimeseriesBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    Map<String, TimeseriesQuery> basicQueries = new LinkedHashMap<>();
    BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    {
        // basic.A
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        queryAggs.add(new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential"));
        queryAggs.add(new LongMaxAggregatorFactory("maxLongUniform", "maxLongUniform"));
        queryAggs.add(new DoubleSumAggregatorFactory("sumFloatNormal", "sumFloatNormal"));
        queryAggs.add(new DoubleMinAggregatorFactory("minFloatZipf", "minFloatZipf"));
        queryAggs.add(new HyperUniquesAggregatorFactory("hyperUniquesMet", "hyper"));
        TimeseriesQuery queryA = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("A", queryA);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        BoundDimFilter timeFilter = new BoundDimFilter(Column.TIME_COLUMN_NAME, "200000", "300000", false, false, null, null, StringComparators.NUMERIC);
        queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("timeFilterNumeric", timeFilterQuery);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        BoundDimFilter timeFilter = new BoundDimFilter(Column.TIME_COLUMN_NAME, "200000", "300000", false, false, null, null, StringComparators.ALPHANUMERIC);
        queryAggs.add(new FilteredAggregatorFactory(lsaf, timeFilter));
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("timeFilterAlphanumeric", timeFilterQuery);
    }
    {
        QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(new Interval(200000, 300000)));
        List<AggregatorFactory> queryAggs = new ArrayList<>();
        LongSumAggregatorFactory lsaf = new LongSumAggregatorFactory("sumLongSequential", "sumLongSequential");
        queryAggs.add(lsaf);
        TimeseriesQuery timeFilterQuery = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
        basicQueries.put("timeFilterByInterval", timeFilterQuery);
    }
    SCHEMA_QUERY_MAP.put("basic", basicQueries);
}
Also used : FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) BoundDimFilter(io.druid.query.filter.BoundDimFilter) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) DoubleMinAggregatorFactory(io.druid.query.aggregation.DoubleMinAggregatorFactory) LinkedHashMap(java.util.LinkedHashMap) BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) QuerySegmentSpec(io.druid.query.spec.QuerySegmentSpec) List(java.util.List) ArrayList(java.util.ArrayList) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) Interval(org.joda.time.Interval)

Example 5 with BenchmarkSchemaInfo

use of io.druid.benchmark.datagen.BenchmarkSchemaInfo in project druid by druid-io.

the class SearchBenchmark method setupQueries.

private void setupQueries() {
    // queries for the basic schema
    final Map<String, SearchQueryBuilder> basicQueries = new LinkedHashMap<>();
    final BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
    final List<String> queryTypes = ImmutableList.of("A", "B", "C", "D");
    for (final String eachType : queryTypes) {
        basicQueries.put(eachType, makeQuery(eachType, basicSchema));
    }
    SCHEMA_QUERY_MAP.put("basic", basicQueries);
}
Also used : BenchmarkSchemaInfo(io.druid.benchmark.datagen.BenchmarkSchemaInfo) SearchQueryBuilder(io.druid.query.Druids.SearchQueryBuilder) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

BenchmarkSchemaInfo (io.druid.benchmark.datagen.BenchmarkSchemaInfo)9 MultipleIntervalSegmentSpec (io.druid.query.spec.MultipleIntervalSegmentSpec)7 QuerySegmentSpec (io.druid.query.spec.QuerySegmentSpec)7 LinkedHashMap (java.util.LinkedHashMap)7 ArrayList (java.util.ArrayList)6 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)5 List (java.util.List)5 DoubleMinAggregatorFactory (io.druid.query.aggregation.DoubleMinAggregatorFactory)3 DoubleSumAggregatorFactory (io.druid.query.aggregation.DoubleSumAggregatorFactory)3 LongMaxAggregatorFactory (io.druid.query.aggregation.LongMaxAggregatorFactory)3 HyperUniquesAggregatorFactory (io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory)3 DefaultDimensionSpec (io.druid.query.dimension.DefaultDimensionSpec)3 BenchmarkDataGenerator (io.druid.benchmark.datagen.BenchmarkDataGenerator)2 InputRow (io.druid.data.input.InputRow)2 TableDataSource (io.druid.query.TableDataSource)2 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)2 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)2 FilteredAggregatorFactory (io.druid.query.aggregation.FilteredAggregatorFactory)2 HyperUniquesSerde (io.druid.query.aggregation.hyperloglog.HyperUniquesSerde)2 BoundDimFilter (io.druid.query.filter.BoundDimFilter)2