use of com.carrotsearch.junitbenchmarks.BenchmarkOptions in project druid by druid-io.
the class OnheapIncrementalIndexBenchmark method testConcurrentAddRead.
@Ignore
@Test
@BenchmarkOptions(callgc = true, clock = Clock.REAL_TIME, warmupRounds = 10, benchmarkRounds = 20)
public void testConcurrentAddRead() throws InterruptedException, ExecutionException, NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException {
final int taskCount = 30;
final int concurrentThreads = 3;
final int elementsPerThread = 1 << 15;
final OnheapIncrementalIndex incrementalIndex = this.incrementalIndex.getConstructor(Long.TYPE, Granularity.class, AggregatorFactory[].class, Integer.TYPE).newInstance(0, Granularities.NONE, factories, elementsPerThread * taskCount);
final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>(dimensionCount + 1);
queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
for (int i = 0; i < dimensionCount; ++i) {
queryAggregatorFactories.add(new LongSumAggregatorFactory(String.format("sumResult%s", i), String.format("sumResult%s", i)));
queryAggregatorFactories.add(new DoubleSumAggregatorFactory(String.format("doubleSumResult%s", i), String.format("doubleSumResult%s", i)));
}
final ListeningExecutorService indexExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("index-executor-%d").setPriority(Thread.MIN_PRIORITY).build()));
final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("query-executor-%d").build()));
final long timestamp = System.currentTimeMillis();
final Interval queryInterval = new Interval("1900-01-01T00:00:00Z/2900-01-01T00:00:00Z");
final List<ListenableFuture<?>> indexFutures = new LinkedList<>();
final List<ListenableFuture<?>> queryFutures = new LinkedList<>();
final Segment incrementalIndexSegment = new IncrementalIndexSegment(incrementalIndex, null);
final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
final AtomicInteger currentlyRunning = new AtomicInteger(0);
final AtomicBoolean concurrentlyRan = new AtomicBoolean(false);
final AtomicBoolean someoneRan = new AtomicBoolean(false);
for (int j = 0; j < taskCount; j++) {
indexFutures.add(indexExecutor.submit(new Runnable() {
@Override
public void run() {
currentlyRunning.incrementAndGet();
try {
for (int i = 0; i < elementsPerThread; i++) {
incrementalIndex.add(getLongRow(timestamp + i, 1, dimensionCount));
}
} catch (IndexSizeExceededException e) {
throw Throwables.propagate(e);
}
currentlyRunning.decrementAndGet();
someoneRan.set(true);
}
}));
queryFutures.add(queryExecutor.submit(new Runnable() {
@Override
public void run() {
QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
Map<String, Object> context = new HashMap<String, Object>();
for (Result<TimeseriesResultValue> result : Sequences.toList(runner.run(query, context), new LinkedList<Result<TimeseriesResultValue>>())) {
if (someoneRan.get()) {
Assert.assertTrue(result.getValue().getDoubleMetric("doubleSumResult0") > 0);
}
}
if (currentlyRunning.get() > 0) {
concurrentlyRan.set(true);
}
}
}));
}
List<ListenableFuture<?>> allFutures = new ArrayList<>(queryFutures.size() + indexFutures.size());
allFutures.addAll(queryFutures);
allFutures.addAll(indexFutures);
Futures.allAsList(allFutures).get();
//Assert.assertTrue("Did not hit concurrency, please try again", concurrentlyRan.get());
queryExecutor.shutdown();
indexExecutor.shutdown();
QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
Map<String, Object> context = new HashMap<String, Object>();
List<Result<TimeseriesResultValue>> results = Sequences.toList(runner.run(query, context), new LinkedList<Result<TimeseriesResultValue>>());
final int expectedVal = elementsPerThread * taskCount;
for (Result<TimeseriesResultValue> result : results) {
Assert.assertEquals(elementsPerThread, result.getValue().getLongMetric("rows").intValue());
for (int i = 0; i < dimensionCount; ++i) {
Assert.assertEquals(String.format("Failed long sum on dimension %d", i), expectedVal, result.getValue().getLongMetric(String.format("sumResult%s", i)).intValue());
Assert.assertEquals(String.format("Failed double sum on dimension %d", i), expectedVal, result.getValue().getDoubleMetric(String.format("doubleSumResult%s", i)).intValue());
}
}
}
use of com.carrotsearch.junitbenchmarks.BenchmarkOptions in project druid by druid-io.
the class CostBalancerStrategyBenchmark method testBenchmark.
@Test
@BenchmarkOptions(warmupRounds = 10, benchmarkRounds = 1000)
public void testBenchmark() throws InterruptedException {
DataSegment segment = CostBalancerStrategyTest.getSegment(1000, "testds", interval1);
selected = strategy.findNewSegmentHomeReplicator(segment, serverHolderList);
}
use of com.carrotsearch.junitbenchmarks.BenchmarkOptions in project parquet-mr by apache.
the class BenchmarkDeltaByteArray method benchmarkSortedStringsWithDeltaLengthByteArrayValuesWriter.
@BenchmarkOptions(benchmarkRounds = 20, warmupRounds = 4)
@Test
public void benchmarkSortedStringsWithDeltaLengthByteArrayValuesWriter() throws IOException {
DeltaByteArrayWriter writer = new DeltaByteArrayWriter(64 * 1024, 64 * 1024, new DirectByteBufferAllocator());
DeltaByteArrayReader reader = new DeltaByteArrayReader();
Utils.writeData(writer, sortedVals);
ByteBufferInputStream data = writer.getBytes().toInputStream();
Binary[] bin = Utils.readData(reader, data, values.length);
System.out.println("size " + data.position());
}
use of com.carrotsearch.junitbenchmarks.BenchmarkOptions in project parquet-mr by apache.
the class BenchmarkDeltaByteArray method benchmarkRandomStringsWithPlainValuesWriter.
@BenchmarkOptions(benchmarkRounds = 20, warmupRounds = 4)
@Test
public void benchmarkRandomStringsWithPlainValuesWriter() throws IOException {
PlainValuesWriter writer = new PlainValuesWriter(64 * 1024, 64 * 1024, new DirectByteBufferAllocator());
BinaryPlainValuesReader reader = new BinaryPlainValuesReader();
Utils.writeData(writer, values);
ByteBufferInputStream data = writer.getBytes().toInputStream();
Binary[] bin = Utils.readData(reader, data, values.length);
System.out.println("size " + data.position());
}
use of com.carrotsearch.junitbenchmarks.BenchmarkOptions in project parquet-mr by apache.
the class RandomWritingBenchmarkTest method writeDeltaPackingTest2.
@BenchmarkOptions(benchmarkRounds = 10, warmupRounds = 2)
@Test
public void writeDeltaPackingTest2() {
DeltaBinaryPackingValuesWriter writer = new DeltaBinaryPackingValuesWriterForInteger(blockSize, miniBlockNum, 100, 20000, new DirectByteBufferAllocator());
runWriteTest(writer);
}
Aggregations