Search in sources :

Example 6 with HyperUniquesSerde

use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.

the class TimeseriesBenchmark method setup.

/**
 * Setup everything common for benchmarking both the incremental-index and the queriable-index.
 */
@Setup
public void setup() {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    setupQueries();
    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
    generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
Also used : TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) DataGenerator(org.apache.druid.segment.generator.DataGenerator) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) Setup(org.openjdk.jmh.annotations.Setup)

Example 7 with HyperUniquesSerde

use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.

the class TopNBenchmark method setup.

/**
 * Setup everything common for benchmarking both the incremental-index and the queriable-index.
 */
@Setup
public void setup() {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    setupQueries();
    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    queryBuilder = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
    queryBuilder.threshold(threshold);
    query = queryBuilder.build();
    generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    factory = new TopNQueryRunnerFactory(new StupidPool<>("TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
Also used : OffheapBufferGenerator(org.apache.druid.offheap.OffheapBufferGenerator) TopNQueryConfig(org.apache.druid.query.topn.TopNQueryConfig) DataGenerator(org.apache.druid.segment.generator.DataGenerator) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) StupidPool(org.apache.druid.collections.StupidPool) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) Setup(org.openjdk.jmh.annotations.Setup)

Example 8 with HyperUniquesSerde

use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.

the class IndexIngestionBenchmark method setup.

@Setup
public void setup() throws JsonProcessingException {
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    // Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
    // It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
    appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
    DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval().getStartMillis(), IndexPersistBenchmark.getValuesPerTimestamp(rollupOpportunity), 1000.0);
    rows = gen.toList(rowsPerSegment);
}
Also used : DataGenerator(org.apache.druid.segment.generator.DataGenerator) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) Setup(org.openjdk.jmh.annotations.Setup)

Example 9 with HyperUniquesSerde

use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.

the class IndexPersistBenchmark method setup.

@Setup
public void setup() throws JsonProcessingException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    // Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
    // It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
    appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
    DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval().getStartMillis(), getValuesPerTimestamp(rollupOpportunity), 1000.0);
    rows = gen.toList(rowsPerSegment);
}
Also used : DataGenerator(org.apache.druid.segment.generator.DataGenerator) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) Setup(org.openjdk.jmh.annotations.Setup)

Example 10 with HyperUniquesSerde

use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.

the class FilteredAggregatorBenchmark method setup.

/**
 * Setup everything common for benchmarking both the incremental-index and the queriable-index.
 */
@Setup
public void setup() {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Collections.singletonList("X"), null)));
    filteredMetric = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
    factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
    GeneratorSchemaInfo basicSchema = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
    QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
    List<AggregatorFactory> queryAggs = Collections.singletonList(filteredMetric);
    query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
}
Also used : FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) RegexDimFilter(org.apache.druid.query.filter.RegexDimFilter) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) ContainsSearchQuerySpec(org.apache.druid.query.search.ContainsSearchQuerySpec) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) DataGenerator(org.apache.druid.segment.generator.DataGenerator) OrDimFilter(org.apache.druid.query.filter.OrDimFilter) InDimFilter(org.apache.druid.query.filter.InDimFilter) SearchQueryDimFilter(org.apache.druid.query.filter.SearchQueryDimFilter) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

HyperUniquesSerde (org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde)19 DataGenerator (org.apache.druid.segment.generator.DataGenerator)14 Setup (org.openjdk.jmh.annotations.Setup)14 InputRow (org.apache.druid.data.input.InputRow)6 IndexSpec (org.apache.druid.segment.IndexSpec)6 File (java.io.File)5 StupidPool (org.apache.druid.collections.StupidPool)4 OffheapBufferGenerator (org.apache.druid.offheap.OffheapBufferGenerator)4 QueryableIndex (org.apache.druid.segment.QueryableIndex)4 IncrementalIndex (org.apache.druid.segment.incremental.IncrementalIndex)4 OnheapIncrementalIndex (org.apache.druid.segment.incremental.OnheapIncrementalIndex)4 ArrayList (java.util.ArrayList)3 TopNQueryQueryToolChest (org.apache.druid.query.topn.TopNQueryQueryToolChest)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)2 ByteBuffer (java.nio.ByteBuffer)2 DefaultBlockingPool (org.apache.druid.collections.DefaultBlockingPool)2 DefaultObjectMapper (org.apache.druid.jackson.DefaultObjectMapper)2 DruidProcessingConfig (org.apache.druid.query.DruidProcessingConfig)2 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)2