Search in sources :

Example 6 with DataGenerator

use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.

the class IndexPersistBenchmark method setup.

@Setup
public void setup() throws JsonProcessingException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    // Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
    // It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
    appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
    DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval().getStartMillis(), getValuesPerTimestamp(rollupOpportunity), 1000.0);
    rows = gen.toList(rowsPerSegment);
}
Also used : DataGenerator(org.apache.druid.segment.generator.DataGenerator) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) Setup(org.openjdk.jmh.annotations.Setup)

Example 7 with DataGenerator

use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.

the class FilteredAggregatorBenchmark method setup.

/**
 * Setup everything common for benchmarking both the incremental-index and the queriable-index.
 */
@Setup
public void setup() {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Collections.singletonList("X"), null)));
    filteredMetric = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
    factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
    GeneratorSchemaInfo basicSchema = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
    QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
    List<AggregatorFactory> queryAggs = Collections.singletonList(filteredMetric);
    query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
}
Also used : FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) RegexDimFilter(org.apache.druid.query.filter.RegexDimFilter) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) ContainsSearchQuerySpec(org.apache.druid.query.search.ContainsSearchQuerySpec) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) DataGenerator(org.apache.druid.segment.generator.DataGenerator) OrDimFilter(org.apache.druid.query.filter.OrDimFilter) InDimFilter(org.apache.druid.query.filter.InDimFilter) SearchQueryDimFilter(org.apache.druid.query.filter.SearchQueryDimFilter) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) Setup(org.openjdk.jmh.annotations.Setup)

Example 8 with DataGenerator

use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.

the class FilterPartitionBenchmark method setup.

@Setup
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    incIndex = makeIncIndex();
    for (int j = 0; j < rowsPerSegment; j++) {
        InputRow row = gen.nextRow();
        if (j % 10000 == 0) {
            log.info(j + " rows generated.");
        }
        incIndex.add(row);
    }
    tmpDir = FileUtils.createTempDir();
    log.info("Using temp dir: " + tmpDir.getAbsolutePath());
    indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec(), null);
    qIndex = INDEX_IO.loadIndex(indexFile);
    Interval interval = schemaInfo.getDataInterval();
    timeFilterNone = new BoundFilter(new BoundDimFilter(ColumnHolder.TIME_COLUMN_NAME, String.valueOf(Long.MAX_VALUE), String.valueOf(Long.MAX_VALUE), true, true, null, null, StringComparators.ALPHANUMERIC));
    long halfEnd = (interval.getEndMillis() + interval.getStartMillis()) / 2;
    timeFilterHalf = new BoundFilter(new BoundDimFilter(ColumnHolder.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(halfEnd), true, true, null, null, StringComparators.ALPHANUMERIC));
    timeFilterAll = new BoundFilter(new BoundDimFilter(ColumnHolder.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(interval.getEndMillis()), true, true, null, null, StringComparators.ALPHANUMERIC));
}
Also used : IndexSpec(org.apache.druid.segment.IndexSpec) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) BoundFilter(org.apache.druid.segment.filter.BoundFilter) DataGenerator(org.apache.druid.segment.generator.DataGenerator) InputRow(org.apache.druid.data.input.InputRow) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) Interval(org.joda.time.Interval) Setup(org.openjdk.jmh.annotations.Setup)

Example 9 with DataGenerator

use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.

the class GroupByTypeInterfaceBenchmark method setup.

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT %d", System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    setupQueries();
    String schemaName = "basic";
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    stringQuery = SCHEMA_QUERY_MAP.get(schemaName).get("string");
    longFloatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("longFloat");
    longQuery = SCHEMA_QUERY_MAP.get(schemaName).get("long");
    floatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("float");
    final DataGenerator dataGenerator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
    tmpDir = FileUtils.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());
    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);
    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);
        final IncrementalIndex index = makeIncIndex();
        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }
        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment, rowsPerSegment * numSegments, i + 1, numSegments);
        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec(), null);
        queryableIndexes.add(INDEX_IO.loadIndex(file));
        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }
    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(new OffheapBufferGenerator("merge", 250_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {

        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);
    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {

        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };
    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), QueryBenchmarkUtil.NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));
    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector));
}
Also used : GroupByStrategySelector(org.apache.druid.query.groupby.strategy.GroupByStrategySelector) IndexSpec(org.apache.druid.segment.IndexSpec) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) GroupByQueryQueryToolChest(org.apache.druid.query.groupby.GroupByQueryQueryToolChest) GroupByStrategyV1(org.apache.druid.query.groupby.strategy.GroupByStrategyV1) GroupByStrategyV2(org.apache.druid.query.groupby.strategy.GroupByStrategyV2) DefaultBlockingPool(org.apache.druid.collections.DefaultBlockingPool) GroupByQueryEngine(org.apache.druid.query.groupby.GroupByQueryEngine) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) GroupByQueryRunnerFactory(org.apache.druid.query.groupby.GroupByQueryRunnerFactory) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) ByteBuffer(java.nio.ByteBuffer) SmileFactory(com.fasterxml.jackson.dataformat.smile.SmileFactory) OffheapBufferGenerator(org.apache.druid.offheap.OffheapBufferGenerator) DataGenerator(org.apache.druid.segment.generator.DataGenerator) InputRow(org.apache.druid.data.input.InputRow) StupidPool(org.apache.druid.collections.StupidPool) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) File(java.io.File) Setup(org.openjdk.jmh.annotations.Setup)

Example 10 with DataGenerator

use of org.apache.druid.segment.generator.DataGenerator in project druid by druid-io.

the class IncrementalIndexReadBenchmark method setup.

@Setup
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + +System.currentTimeMillis());
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
    // Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
    // It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
    appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
    DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
    incIndex = makeIncIndex();
    gen.addToIndex(incIndex, rowsPerSegment);
}
Also used : DataGenerator(org.apache.druid.segment.generator.DataGenerator) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

DataGenerator (org.apache.druid.segment.generator.DataGenerator)15 HyperUniquesSerde (org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde)14 Setup (org.openjdk.jmh.annotations.Setup)14 InputRow (org.apache.druid.data.input.InputRow)5 IndexSpec (org.apache.druid.segment.IndexSpec)5 File (java.io.File)4 StupidPool (org.apache.druid.collections.StupidPool)4 OffheapBufferGenerator (org.apache.druid.offheap.OffheapBufferGenerator)4 IncrementalIndex (org.apache.druid.segment.incremental.IncrementalIndex)4 OnheapIncrementalIndex (org.apache.druid.segment.incremental.OnheapIncrementalIndex)4 TimeseriesQueryQueryToolChest (org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest)3 TopNQueryQueryToolChest (org.apache.druid.query.topn.TopNQueryQueryToolChest)3 QueryableIndex (org.apache.druid.segment.QueryableIndex)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)2 ByteBuffer (java.nio.ByteBuffer)2 DefaultBlockingPool (org.apache.druid.collections.DefaultBlockingPool)2 DefaultObjectMapper (org.apache.druid.jackson.DefaultObjectMapper)2 DruidProcessingConfig (org.apache.druid.query.DruidProcessingConfig)2 BoundDimFilter (org.apache.druid.query.filter.BoundDimFilter)2