use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class IndexPersistBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + +System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
rows = new ArrayList<InputRow>();
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
for (int i = 0; i < rowsPerSegment; i++) {
InputRow row = gen.nextRow();
if (i % 10000 == 0) {
log.info(i + " rows generated.");
}
rows.add(row);
}
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class IndexPersistBenchmark method setup2.
@Setup(Level.Iteration)
public void setup2() throws IOException {
incIndex = makeIncIndex();
for (int i = 0; i < rowsPerSegment; i++) {
InputRow row = rows.get(i);
incIndex.add(row);
}
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class TopNTypeInterfaceBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
executorService = Execs.multiThreaded(numSegments, "TopNThreadPool");
setupQueries();
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get("basic");
queryBuilder = SCHEMA_QUERY_MAP.get("basic").get("string");
queryBuilder.threshold(threshold);
stringQuery = queryBuilder.build();
TopNQueryBuilder longBuilder = SCHEMA_QUERY_MAP.get("basic").get("long");
longBuilder.threshold(threshold);
longQuery = longBuilder.build();
TopNQueryBuilder floatBuilder = SCHEMA_QUERY_MAP.get("basic").get("float");
floatBuilder.threshold(threshold);
floatQuery = floatBuilder.build();
incIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
log.info("Generating rows for segment " + i);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + i, schemaInfo.getDataInterval(), rowsPerSegment);
IncrementalIndex incIndex = makeIncIndex();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
}
incIndexes.add(incIndex);
}
File tmpFile = Files.createTempDir();
log.info("Using temp dir: " + tmpFile.getAbsolutePath());
tmpFile.deleteOnExit();
qIndexes = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
File indexFile = INDEX_MERGER_V9.persist(incIndexes.get(i), tmpFile, new IndexSpec());
QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
qIndexes.add(qIndex);
}
factory = new TopNQueryRunnerFactory(new StupidPool<>("TopNBenchmark-compute-bufferPool", new OffheapBufferGenerator("compute", 250000000), 0, Integer.MAX_VALUE), new TopNQueryQueryToolChest(new TopNQueryConfig(), QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class BoundFilterBenchmark method setup.
@Setup
public void setup() throws IOException {
step = (END_INT - START_INT) / cardinality;
final BitmapFactory bitmapFactory = new RoaringBitmapFactory();
final BitmapSerdeFactory serdeFactory = new RoaringBitmapSerdeFactory(null);
final List<Integer> ints = generateInts();
final GenericIndexed<String> dictionary = GenericIndexed.fromIterable(FluentIterable.from(ints).transform(new Function<Integer, String>() {
@Override
public String apply(Integer i) {
return i.toString();
}
}), GenericIndexed.STRING_STRATEGY);
final BitmapIndex bitmapIndex = new BitmapIndexColumnPartSupplier(bitmapFactory, GenericIndexed.fromIterable(FluentIterable.from(ints).transform(new Function<Integer, ImmutableBitmap>() {
@Override
public ImmutableBitmap apply(Integer i) {
final MutableBitmap mutableBitmap = bitmapFactory.makeEmptyMutableBitmap();
mutableBitmap.add((i - START_INT) / step);
return bitmapFactory.makeImmutableBitmap(mutableBitmap);
}
}), serdeFactory.getObjectStrategy()), dictionary).get();
selector = new BitmapIndexSelector() {
@Override
public Indexed<String> getDimensionValues(String dimension) {
return dictionary;
}
@Override
public int getNumRows() {
throw new UnsupportedOperationException();
}
@Override
public BitmapFactory getBitmapFactory() {
return bitmapFactory;
}
@Override
public ImmutableBitmap getBitmapIndex(String dimension, String value) {
return bitmapIndex.getBitmap(bitmapIndex.getIndex(value));
}
@Override
public BitmapIndex getBitmapIndex(String dimension) {
return bitmapIndex;
}
@Override
public ImmutableRTree getSpatialIndex(String dimension) {
throw new UnsupportedOperationException();
}
};
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class SqlBenchmark method setup.
@Setup(Level.Trial)
public void setup() throws Exception {
tmpDir = Files.createTempDir();
log.info("Starting benchmark setup using tmpDir[%s], rows[%,d].", tmpDir, rowsPerSegment);
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
final BenchmarkSchemaInfo schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get("basic");
final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
final List<InputRow> rows = Lists.newArrayList();
for (int i = 0; i < rowsPerSegment; i++) {
final InputRow row = dataGenerator.nextRow();
if (i % 20000 == 0) {
log.info("%,d/%,d rows generated.", i, rowsPerSegment);
}
rows.add(row);
}
log.info("%,d/%,d rows generated.", rows.size(), rowsPerSegment);
final PlannerConfig plannerConfig = new PlannerConfig();
final QueryRunnerFactoryConglomerate conglomerate = CalciteTests.queryRunnerFactoryConglomerate();
final QueryableIndex index = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).indexMerger(TestHelper.getTestIndexMergerV9()).rows(rows).buildMMappedIndex();
this.walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource("foo").interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index);
final Map<String, Table> tableMap = ImmutableMap.<String, Table>of("foo", new DruidTable(new TableDataSource("foo"), RowSignature.builder().add("__time", ValueType.LONG).add("dimSequential", ValueType.STRING).add("dimZipf", ValueType.STRING).add("dimUniform", ValueType.STRING).build()));
final Schema druidSchema = new AbstractSchema() {
@Override
protected Map<String, Table> getTableMap() {
return tableMap;
}
};
plannerFactory = new PlannerFactory(Calcites.createRootSchema(druidSchema), walker, CalciteTests.createOperatorTable(), plannerConfig);
groupByQuery = GroupByQuery.builder().setDataSource("foo").setInterval(new Interval(JodaUtils.MIN_INSTANT, JodaUtils.MAX_INSTANT)).setDimensions(Arrays.<DimensionSpec>asList(new DefaultDimensionSpec("dimZipf", "d0"), new DefaultDimensionSpec("dimSequential", "d1"))).setAggregatorSpecs(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("c"))).setGranularity(Granularities.ALL).build();
sqlQuery = "SELECT\n" + " dimZipf AS d0," + " dimSequential AS d1,\n" + " COUNT(*) AS c\n" + "FROM druid.foo\n" + "GROUP BY dimZipf, dimSequential";
}
Aggregations