use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class FilteredAggregatorBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
incIndex = makeIncIndex(schemaInfo.getAggsArray());
filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new JavaScriptDimFilter("dimSequential", "function(x) { return false }", null, JavaScriptConfig.getEnabledInstance()), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Arrays.asList("X"), null)));
filteredMetrics = new AggregatorFactory[1];
filteredMetrics[0] = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
incIndexFilteredAgg = makeIncIndex(filteredMetrics);
inputRows = new ArrayList<>();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
inputRows.add(row);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
qIndex = INDEX_IO.loadIndex(indexFile);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
List<AggregatorFactory> queryAggs = new ArrayList<>();
queryAggs.add(filteredMetrics[0]);
query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class LongCompressionBenchmark method setup.
@Setup
public void setup() throws Exception {
File dir = new File(dirPath);
File compFile = new File(dir, file + "-" + strategy + "-" + format);
rand = new Random();
ByteBuffer buffer = Files.map(compFile);
supplier = CompressedLongsIndexedSupplier.fromByteBuffer(buffer, ByteOrder.nativeOrder(), null);
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class TimeParseBenchmark method setup.
@Setup
public void setup() {
SimpleDateFormat format = new SimpleDateFormat(DATA_FORMAT);
long start = System.currentTimeMillis();
int rowsPerBatch = numRows / numBatches;
int numRowInBatch = 0;
rows = new String[numRows];
for (int i = 0; i < numRows; ++i) {
if (numRowInBatch >= rowsPerBatch) {
numRowInBatch = 0;
// new batch, add 5 seconds
start += 5000;
}
rows[i] = format.format(new Date(start));
numRowInBatch++;
}
}
use of org.openjdk.jmh.annotations.Setup in project druid by druid-io.
the class FloatCompressionBenchmark method setup.
@Setup
public void setup() throws Exception {
File dir = new File(dirPath);
File compFile = new File(dir, file + "-" + strategy);
rand = new Random();
ByteBuffer buffer = Files.map(compFile);
supplier = CompressedFloatsIndexedSupplier.fromByteBuffer(buffer, ByteOrder.nativeOrder(), null);
}
use of org.openjdk.jmh.annotations.Setup in project gradle by gradle.
the class FileMetadataAccessorBenchmark method prepare.
@Setup
public void prepare() throws IOException {
accessor = getAccessor(accessorClassName);
missing = new File(UUID.randomUUID().toString());
directory = File.createTempFile("jmh", "dir");
directory.mkdirs();
realFile = File.createTempFile("jmh", "tmp");
FileOutputStream fos = new FileOutputStream(realFile);
fos.write(new byte[1024]);
fos.close();
}
Aggregations