use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class GroupByTypeInterfaceBenchmark method setup.
@Setup(Level.Trial)
public void setup() throws IOException {
log.info("SETUP CALLED AT %d", System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
executorService = Execs.multiThreaded(numProcessingThreads, "GroupByThreadPool[%d]");
setupQueries();
String schemaName = "basic";
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
stringQuery = SCHEMA_QUERY_MAP.get(schemaName).get("string");
longFloatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("longFloat");
longQuery = SCHEMA_QUERY_MAP.get(schemaName).get("long");
floatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("float");
final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
tmpDir = Files.createTempDir();
log.info("Using temp dir: %s", tmpDir.getAbsolutePath());
// queryableIndexes -> numSegments worth of on-disk segments
// anIncrementalIndex -> the last incremental index
anIncrementalIndex = null;
queryableIndexes = new ArrayList<>(numSegments);
for (int i = 0; i < numSegments; i++) {
log.info("Generating rows for segment %d/%d", i + 1, numSegments);
final IncrementalIndex index = makeIncIndex();
for (int j = 0; j < rowsPerSegment; j++) {
final InputRow row = dataGenerator.nextRow();
if (j % 20000 == 0) {
log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
}
index.add(row);
}
log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment, rowsPerSegment * numSegments, i + 1, numSegments);
final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec());
queryableIndexes.add(INDEX_IO.loadIndex(file));
if (i == numSegments - 1) {
anIncrementalIndex = index;
} else {
index.close();
}
}
StupidPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
BlockingPool<ByteBuffer> mergePool = new BlockingPool<>(new OffheapBufferGenerator("merge", 250_000_000), 2);
final GroupByQueryConfig config = new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return defaultStrategy;
}
@Override
public int getBufferGrouperInitialBuckets() {
return initialBuckets;
}
@Override
public long getMaxOnDiskStorage() {
return 1_000_000_000L;
}
};
config.setSingleThreaded(false);
config.setMaxIntermediateRows(Integer.MAX_VALUE);
config.setMaxResults(Integer.MAX_VALUE);
DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return numProcessingThreads;
}
@Override
public String getFormatString() {
return null;
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));
factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector, QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class CompressedIndexedIntsBenchmark method setup.
@Setup
public void setup() throws IOException {
Random rand = new Random(0);
int[] vals = new int[0x100000];
final int bound = 1 << bytes;
for (int i = 0; i < vals.length; ++i) {
vals[i] = rand.nextInt(bound);
}
final ByteBuffer bufferCompressed = serialize(CompressedVSizeIntsIndexedSupplier.fromList(Ints.asList(vals), bound - 1, CompressedVSizeIntsIndexedSupplier.maxIntsInBufferForBytes(bytes), ByteOrder.nativeOrder(), CompressedObjectStrategy.CompressionStrategy.LZ4));
this.compressed = CompressedVSizeIntsIndexedSupplier.fromByteBuffer(bufferCompressed, ByteOrder.nativeOrder(), null).get();
final ByteBuffer bufferUncompressed = serialize(new VSizeIndexedInts.VSizeIndexedIntsSupplier(VSizeIndexedInts.fromArray(vals)));
this.uncompressed = VSizeIndexedInts.readFromByteBuffer(bufferUncompressed);
filter = new BitSet();
for (int i = 0; i < filteredRowCount; i++) {
int rowToAccess = rand.nextInt(vals.length);
// Skip already selected rows if any
while (filter.get(rowToAccess)) {
rowToAccess = (rowToAccess + 1) % vals.length;
}
filter.set(rowToAccess);
}
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class CompressedVSizeIndexedBenchmark method setup.
@Setup
public void setup() throws IOException {
Random rand = new Random(0);
List<int[]> rows = Lists.newArrayList();
final int bound = 1 << bytes;
for (int i = 0; i < 0x100000; i++) {
int count = rand.nextInt(valuesPerRowBound) + 1;
int[] row = new int[rand.nextInt(count)];
for (int j = 0; j < row.length; j++) {
row[j] = rand.nextInt(bound);
}
rows.add(row);
}
final ByteBuffer bufferCompressed = serialize(CompressedVSizeIndexedSupplier.fromIterable(Iterables.transform(rows, new Function<int[], IndexedInts>() {
@Override
public IndexedInts apply(int[] input) {
return VSizeIndexedInts.fromArray(input, 20);
}
}), bound - 1, ByteOrder.nativeOrder(), CompressedObjectStrategy.CompressionStrategy.LZ4));
this.compressed = CompressedVSizeIndexedSupplier.fromByteBuffer(bufferCompressed, ByteOrder.nativeOrder(), null).get();
final ByteBuffer bufferUncompressed = serialize(VSizeIndexed.fromIterable(Iterables.transform(rows, new Function<int[], VSizeIndexedInts>() {
@Override
public VSizeIndexedInts apply(int[] input) {
return VSizeIndexedInts.fromArray(input, 20);
}
})).asWritableSupplier());
this.uncompressed = VSizeIndexed.readFromByteBuffer(bufferUncompressed);
filter = new BitSet();
for (int i = 0; i < filteredRowCount; i++) {
int rowToAccess = rand.nextInt(rows.size());
// Skip already selected rows if any
while (filter.get(rowToAccess)) {
rowToAccess = (rowToAccess + 1) % rows.size();
}
filter.set(rowToAccess);
}
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class VSizeSerdeBenchmark method setup.
@Setup
public void setup() throws IOException, URISyntaxException {
// this uses a dummy file of sufficient size to construct a mappedByteBuffer instead of using ByteBuffer.allocate
// to construct a heapByteBuffer since they have different performance
File base = new File(this.getClass().getClassLoader().getResource("").toURI());
dummy = new File(base, "dummy");
try (Writer writer = new BufferedWriter(new FileWriter(dummy))) {
String EMPTY_STRING = " ";
for (int i = 0; i < values + 10; i++) {
writer.write(EMPTY_STRING);
}
}
ByteBuffer buffer = Files.map(dummy);
d1 = VSizeLongSerde.getDeserializer(1, buffer, 10);
d2 = VSizeLongSerde.getDeserializer(2, buffer, 10);
d4 = VSizeLongSerde.getDeserializer(4, buffer, 10);
d8 = VSizeLongSerde.getDeserializer(8, buffer, 10);
d12 = VSizeLongSerde.getDeserializer(12, buffer, 10);
d16 = VSizeLongSerde.getDeserializer(16, buffer, 10);
d20 = VSizeLongSerde.getDeserializer(20, buffer, 10);
d24 = VSizeLongSerde.getDeserializer(24, buffer, 10);
d32 = VSizeLongSerde.getDeserializer(32, buffer, 10);
d40 = VSizeLongSerde.getDeserializer(40, buffer, 10);
d48 = VSizeLongSerde.getDeserializer(48, buffer, 10);
d56 = VSizeLongSerde.getDeserializer(56, buffer, 10);
d64 = VSizeLongSerde.getDeserializer(64, buffer, 10);
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class FilterPartitionBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
incIndex = makeIncIndex();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
qIndex = INDEX_IO.loadIndex(indexFile);
Interval interval = schemaInfo.getDataInterval();
timeFilterNone = new BoundFilter(new BoundDimFilter(Column.TIME_COLUMN_NAME, String.valueOf(Long.MAX_VALUE), String.valueOf(Long.MAX_VALUE), true, true, null, null, StringComparators.ALPHANUMERIC));
long halfEnd = (interval.getEndMillis() + interval.getStartMillis()) / 2;
timeFilterHalf = new BoundFilter(new BoundDimFilter(Column.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(halfEnd), true, true, null, null, StringComparators.ALPHANUMERIC));
timeFilterAll = new BoundFilter(new BoundDimFilter(Column.TIME_COLUMN_NAME, String.valueOf(interval.getStartMillis()), String.valueOf(interval.getEndMillis()), true, true, null, null, StringComparators.ALPHANUMERIC));
}
Aggregations