use of io.airlift.stats.Distribution in project presto by prestodb.
the class PipelineContext method getPipelineStats.
public PipelineStats getPipelineStats() {
// check for end state to avoid callback ordering problems
if (taskContext.getState().isDone()) {
DateTime now = DateTime.now();
executionStartTime.compareAndSet(null, now);
lastExecutionStartTime.compareAndSet(null, now);
lastExecutionEndTime.compareAndSet(null, now);
}
List<DriverContext> driverContexts = ImmutableList.copyOf(this.drivers);
int totalDriers = completedDrivers.get() + driverContexts.size();
int queuedDrivers = 0;
int queuedPartitionedDrivers = 0;
int runningDrivers = 0;
int runningPartitionedDrivers = 0;
int completedDrivers = this.completedDrivers.get();
Distribution queuedTime = new Distribution(this.queuedTime);
Distribution elapsedTime = new Distribution(this.elapsedTime);
long totalScheduledTime = this.totalScheduledTime.get();
long totalCpuTime = this.totalCpuTime.get();
long totalUserTime = this.totalUserTime.get();
long totalBlockedTime = this.totalBlockedTime.get();
long rawInputDataSize = this.rawInputDataSize.getTotalCount();
long rawInputPositions = this.rawInputPositions.getTotalCount();
long processedInputDataSize = this.processedInputDataSize.getTotalCount();
long processedInputPositions = this.processedInputPositions.getTotalCount();
long outputDataSize = this.outputDataSize.getTotalCount();
long outputPositions = this.outputPositions.getTotalCount();
List<DriverStats> drivers = new ArrayList<>();
Multimap<Integer, OperatorStats> runningOperators = ArrayListMultimap.create();
for (DriverContext driverContext : driverContexts) {
DriverStats driverStats = driverContext.getDriverStats();
drivers.add(driverStats);
if (driverStats.getStartTime() == null) {
queuedDrivers++;
if (driverContext.isPartitioned()) {
queuedPartitionedDrivers++;
}
} else {
runningDrivers++;
if (driverContext.isPartitioned()) {
runningPartitionedDrivers++;
}
}
queuedTime.add(driverStats.getQueuedTime().roundTo(NANOSECONDS));
elapsedTime.add(driverStats.getElapsedTime().roundTo(NANOSECONDS));
totalScheduledTime += driverStats.getTotalScheduledTime().roundTo(NANOSECONDS);
totalCpuTime += driverStats.getTotalCpuTime().roundTo(NANOSECONDS);
totalUserTime += driverStats.getTotalUserTime().roundTo(NANOSECONDS);
totalBlockedTime += driverStats.getTotalBlockedTime().roundTo(NANOSECONDS);
List<OperatorStats> operators = ImmutableList.copyOf(transform(driverContext.getOperatorContexts(), OperatorContext::getOperatorStats));
for (OperatorStats operator : operators) {
runningOperators.put(operator.getOperatorId(), operator);
}
rawInputDataSize += driverStats.getRawInputDataSize().toBytes();
rawInputPositions += driverStats.getRawInputPositions();
processedInputDataSize += driverStats.getProcessedInputDataSize().toBytes();
processedInputPositions += driverStats.getProcessedInputPositions();
outputDataSize += driverStats.getOutputDataSize().toBytes();
outputPositions += driverStats.getOutputPositions();
}
// merge the running operator stats into the operator summary
TreeMap<Integer, OperatorStats> operatorSummaries = new TreeMap<>(this.operatorSummaries);
for (Entry<Integer, OperatorStats> entry : runningOperators.entries()) {
OperatorStats current = operatorSummaries.get(entry.getKey());
if (current == null) {
current = entry.getValue();
} else {
current = current.add(entry.getValue());
}
operatorSummaries.put(entry.getKey(), current);
}
ImmutableSet<BlockedReason> blockedReasons = drivers.stream().filter(driver -> driver.getEndTime() == null && driver.getStartTime() != null).flatMap(driver -> driver.getBlockedReasons().stream()).collect(ImmutableCollectors.toImmutableSet());
boolean fullyBlocked = drivers.stream().filter(driver -> driver.getEndTime() == null && driver.getStartTime() != null).allMatch(DriverStats::isFullyBlocked);
return new PipelineStats(pipelineId, executionStartTime.get(), lastExecutionStartTime.get(), lastExecutionEndTime.get(), inputPipeline, outputPipeline, totalDriers, queuedDrivers, queuedPartitionedDrivers, runningDrivers, runningPartitionedDrivers, completedDrivers, succinctBytes(memoryReservation.get()), succinctBytes(systemMemoryReservation.get()), queuedTime.snapshot(), elapsedTime.snapshot(), new Duration(totalScheduledTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), new Duration(totalCpuTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), new Duration(totalUserTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), new Duration(totalBlockedTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), fullyBlocked && (runningDrivers > 0 || runningPartitionedDrivers > 0), blockedReasons, succinctBytes(rawInputDataSize), rawInputPositions, succinctBytes(processedInputDataSize), processedInputPositions, succinctBytes(outputDataSize), outputPositions, ImmutableList.copyOf(operatorSummaries.values()), drivers);
}
use of io.airlift.stats.Distribution in project presto by prestodb.
the class TaskExecutorSimulator method run.
public void run() throws Exception {
Multimap<Integer, SimulationTask> tasks = Multimaps.synchronizedListMultimap(ArrayListMultimap.<Integer, SimulationTask>create());
Set<ListenableFuture<?>> finishFutures = newConcurrentHashSet();
AtomicBoolean done = new AtomicBoolean();
long start = System.nanoTime();
// large tasks
for (int userId = 0; userId < 2; userId++) {
ListenableFuture<?> future = createUser("large_" + userId, 100, taskExecutor, done, tasks);
finishFutures.add(future);
}
// small tasks
for (int userId = 0; userId < 4; userId++) {
ListenableFuture<?> future = createUser("small_" + userId, 5, taskExecutor, done, tasks);
finishFutures.add(future);
}
// tiny tasks
for (int userId = 0; userId < 1; userId++) {
ListenableFuture<?> future = createUser("tiny_" + userId, 1, taskExecutor, done, tasks);
finishFutures.add(future);
}
// warm up
for (int i = 0; i < 30; i++) {
MILLISECONDS.sleep(1000);
System.out.println(taskExecutor);
}
tasks.clear();
// run
for (int i = 0; i < 60; i++) {
MILLISECONDS.sleep(1000);
System.out.println(taskExecutor);
}
// capture finished tasks
Map<Integer, Collection<SimulationTask>> middleTasks;
synchronized (tasks) {
middleTasks = new TreeMap<>(tasks.asMap());
}
// wait for finish
done.set(true);
Futures.allAsList(finishFutures).get(1, TimeUnit.MINUTES);
Duration runtime = Duration.nanosSince(start).convertToMostSuccinctTimeUnit();
synchronized (this) {
System.out.println();
System.out.println("Simulation finished in " + runtime);
System.out.println();
for (Entry<Integer, Collection<SimulationTask>> entry : middleTasks.entrySet()) {
Distribution durationDistribution = new Distribution();
Distribution taskParallelismDistribution = new Distribution();
for (SimulationTask task : entry.getValue()) {
long taskStart = Long.MAX_VALUE;
long taskEnd = 0;
long totalCpuTime = 0;
for (SimulationSplit split : task.getSplits()) {
taskStart = Math.min(taskStart, split.getStartNanos());
taskEnd = Math.max(taskEnd, split.getDoneNanos());
totalCpuTime += MILLISECONDS.toNanos(split.getRequiredProcessMillis());
}
Duration taskDuration = new Duration(taskEnd - taskStart, NANOSECONDS).convertTo(MILLISECONDS);
durationDistribution.add(taskDuration.toMillis());
double taskParallelism = 1.0 * totalCpuTime / (taskEnd - taskStart);
taskParallelismDistribution.add((long) (taskParallelism * 100));
}
System.out.println("Splits " + entry.getKey() + ": Completed " + entry.getValue().size());
Map<Double, Long> durationPercentiles = durationDistribution.getPercentiles();
System.out.printf(" wall time ms :: p01 %4s :: p05 %4s :: p10 %4s :: p97 %4s :: p50 %4s :: p75 %4s :: p90 %4s :: p95 %4s :: p99 %4s\n", durationPercentiles.get(0.01), durationPercentiles.get(0.05), durationPercentiles.get(0.10), durationPercentiles.get(0.25), durationPercentiles.get(0.50), durationPercentiles.get(0.75), durationPercentiles.get(0.90), durationPercentiles.get(0.95), durationPercentiles.get(0.99));
Map<Double, Long> parallelismPercentiles = taskParallelismDistribution.getPercentiles();
System.out.printf(" parallelism :: p99 %4.2f :: p95 %4.2f :: p90 %4.2f :: p75 %4.2f :: p50 %4.2f :: p25 %4.2f :: p10 %4.2f :: p05 %4.2f :: p01 %4.2f\n", parallelismPercentiles.get(0.99) / 100.0, parallelismPercentiles.get(0.95) / 100.0, parallelismPercentiles.get(0.90) / 100.0, parallelismPercentiles.get(0.75) / 100.0, parallelismPercentiles.get(0.50) / 100.0, parallelismPercentiles.get(0.25) / 100.0, parallelismPercentiles.get(0.10) / 100.0, parallelismPercentiles.get(0.05) / 100.0, parallelismPercentiles.get(0.01) / 100.0);
}
}
Thread.sleep(10);
}
Aggregations