use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.
the class TimeseriesBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.
the class TopNBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
queryBuilder = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
queryBuilder.threshold(threshold);
query = queryBuilder.build();
generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
factory = new TopNQueryRunnerFactory(new StupidPool<>("TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.
the class IndexIngestionBenchmark method setup.
@Setup
public void setup() throws JsonProcessingException {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
// Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
// It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval().getStartMillis(), IndexPersistBenchmark.getValuesPerTimestamp(rollupOpportunity), 1000.0);
rows = gen.toList(rowsPerSegment);
}
use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.
the class IndexPersistBenchmark method setup.
@Setup
public void setup() throws JsonProcessingException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
// Creates an AppendableIndexSpec that corresponds to the indexType parametrization.
// It is used in {@code makeIncIndex()} to instanciate an incremental-index of the specified type.
appendableIndexSpec = IncrementalIndexCreator.parseIndexType(indexType);
DataGenerator gen = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval().getStartMillis(), getValuesPerTimestamp(rollupOpportunity), 1000.0);
rows = gen.toList(rowsPerSegment);
}
use of org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde in project druid by druid-io.
the class FilteredAggregatorBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schema);
generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Collections.singletonList("X"), null)));
filteredMetric = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
GeneratorSchemaInfo basicSchema = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
List<AggregatorFactory> queryAggs = Collections.singletonList(filteredMetric);
query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(descending).build();
}
Aggregations