use of org.apache.commons.math3.distribution.WeibullDistribution in project cassandra by apache.
the class LongSharedExecutorPoolTest method testPromptnessOfExecution.
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException {
final int executorCount = 4;
int threadCount = 8;
int maxQueued = 1024;
final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);
final int[] threadCounts = new int[executorCount];
final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
final ExecutorService[] executors = new ExecutorService[executorCount];
for (int i = 0; i < executors.length; i++) {
executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i);
threadCounts[i] = threadCount;
workCount[i] = new WeibullDistribution(2, maxQueued);
threadCount *= 2;
maxQueued *= 2;
}
long runs = 0;
long events = 0;
final TreeSet<Batch> pending = new TreeSet<>();
final BitSet executorsWithWork = new BitSet(executorCount);
long until = 0;
// (beyond max queued size), and longer operations
for (float multiplier = 0f; multiplier < 2.01f; ) {
if (System.nanoTime() > until) {
System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f));
events = 0;
until = System.nanoTime() + intervalNanos;
multiplier += loadIncrement;
System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
}
// wait a random amount of time so we submit new tasks in various stages of
long timeout;
if (pending.isEmpty())
timeout = 0;
else if (Math.random() > 0.98)
timeout = Long.MAX_VALUE;
else if (pending.size() == executorCount)
timeout = pending.first().timeout;
else
timeout = (long) (Math.random() * pending.last().timeout);
while (!pending.isEmpty() && timeout > System.nanoTime()) {
Batch first = pending.first();
boolean complete = false;
try {
for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
complete = true;
} catch (TimeoutException e) {
}
if (!complete && System.nanoTime() > first.timeout) {
for (Result result : first.results) if (!result.future.isDone())
throw new AssertionError();
complete = true;
}
if (complete) {
pending.pollFirst();
executorsWithWork.clear(first.executorIndex);
}
}
// if we've emptied the executors, give all our threads an opportunity to spin down
if (timeout == Long.MAX_VALUE)
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
// submit a random batch to the first free executor service
int executorIndex = executorsWithWork.nextClearBit(0);
if (executorIndex >= executorCount)
continue;
executorsWithWork.set(executorIndex);
ExecutorService executor = executors[executorIndex];
TreeSet<Result> results = new TreeSet<>();
int count = (int) (workCount[executorIndex].sample() * multiplier);
long targetTotalElapsed = 0;
long start = System.nanoTime();
long baseTime;
if (Math.random() > 0.5)
baseTime = 2 * (long) (workTime.sample() * multiplier);
else
baseTime = 0;
for (int j = 0; j < count; j++) {
long time;
if (baseTime == 0)
time = (long) (workTime.sample() * multiplier);
else
time = (long) (baseTime * Math.random());
if (time < minWorkTime)
time = minWorkTime;
if (time > maxWorkTime)
time = maxWorkTime;
targetTotalElapsed += time;
Future<?> future = executor.submit(new WaitTask(time));
results.add(new Result(future, System.nanoTime() + time));
}
long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L);
long now = System.nanoTime();
if (runs++ > executorCount && now > end)
throw new AssertionError();
events += results.size();
pending.add(new Batch(results, end, executorIndex));
// System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
}
}
Aggregations