use of io.airlift.units.Duration in project presto by prestodb.
the class PerfTest method executeQueries.
private static void executeQueries(List<String> queries, ParallelQueryRunner parallelQueryRunner, int parallelism) throws Exception {
Duration duration = parallelQueryRunner.executeCommands(parallelism, queries);
System.out.printf("%2d: %s\n", parallelism, duration.convertTo(TimeUnit.SECONDS));
}
use of io.airlift.units.Duration in project presto by prestodb.
the class StatusPrinter method printStageTree.
private void printStageTree(StageStats stage, String indent, AtomicInteger stageNumberCounter) {
Duration elapsedTime = nanosSince(start);
// STAGE S ROWS ROWS/s BYTES BYTES/s QUEUED RUN DONE
// 0......Q 26M 9077M 9993G 9077M 9077M 9077M 9077M
// 2....R 17K 627M 673M 627M 627M 627M 627M
// 3..C 999 627M 673M 627M 627M 627M 627M
// 4....R 26M 627M 673T 627M 627M 627M 627M
// 5..F 29T 627M 673M 627M 627M 627M 627M
String id = String.valueOf(stageNumberCounter.getAndIncrement());
String name = indent + id;
name += Strings.repeat(".", max(0, 10 - name.length()));
String bytesPerSecond;
String rowsPerSecond;
if (stage.isDone()) {
bytesPerSecond = formatDataRate(new DataSize(0, BYTE), new Duration(0, SECONDS), false);
rowsPerSecond = formatCountRate(0, new Duration(0, SECONDS), false);
} else {
bytesPerSecond = formatDataRate(bytes(stage.getProcessedBytes()), elapsedTime, false);
rowsPerSecond = formatCountRate(stage.getProcessedRows(), elapsedTime, false);
}
String stageSummary = String.format("%10s%1s %5s %6s %5s %7s %6s %5s %5s", name, stageStateCharacter(stage.getState()), formatCount(stage.getProcessedRows()), rowsPerSecond, formatDataSize(bytes(stage.getProcessedBytes()), false), bytesPerSecond, stage.getQueuedSplits(), stage.getRunningSplits(), stage.getCompletedSplits());
reprintLine(stageSummary);
for (StageStats subStage : stage.getSubStages()) {
printStageTree(subStage, indent + " ", stageNumberCounter);
}
}
use of io.airlift.units.Duration in project presto by prestodb.
the class PlanPrinter method getPlanNodeStats.
private static List<PlanNodeStats> getPlanNodeStats(TaskStats taskStats) {
// Best effort to reconstruct the plan nodes from operators.
// Because stats are collected separately from query execution,
// it's possible that some or all of them are missing or out of date.
// For example, a LIMIT clause can cause a query to finish before stats
// are collected from the leaf stages.
Map<PlanNodeId, Long> planNodeInputPositions = new HashMap<>();
Map<PlanNodeId, Long> planNodeInputBytes = new HashMap<>();
Map<PlanNodeId, Long> planNodeOutputPositions = new HashMap<>();
Map<PlanNodeId, Long> planNodeOutputBytes = new HashMap<>();
Map<PlanNodeId, Long> planNodeWallMillis = new HashMap<>();
Map<PlanNodeId, Map<String, OperatorInputStats>> operatorInputStats = new HashMap<>();
Map<PlanNodeId, Map<String, OperatorHashCollisionsStats>> operatorHashCollisionsStats = new HashMap<>();
for (PipelineStats pipelineStats : taskStats.getPipelines()) {
// Due to eventual consistently collected stats, these could be empty
if (pipelineStats.getOperatorSummaries().isEmpty()) {
continue;
}
Set<PlanNodeId> processedNodes = new HashSet<>();
PlanNodeId inputPlanNode = pipelineStats.getOperatorSummaries().iterator().next().getPlanNodeId();
PlanNodeId outputPlanNode = getLast(pipelineStats.getOperatorSummaries()).getPlanNodeId();
// Gather input statistics
for (OperatorStats operatorStats : pipelineStats.getOperatorSummaries()) {
PlanNodeId planNodeId = operatorStats.getPlanNodeId();
long wall = operatorStats.getAddInputWall().toMillis() + operatorStats.getGetOutputWall().toMillis() + operatorStats.getFinishWall().toMillis();
planNodeWallMillis.merge(planNodeId, wall, Long::sum);
// A pipeline like hash build before join might link to another "internal" pipelines which provide actual input for this plan node
if (operatorStats.getPlanNodeId().equals(inputPlanNode) && !pipelineStats.isInputPipeline()) {
continue;
}
if (processedNodes.contains(planNodeId)) {
continue;
}
operatorInputStats.merge(planNodeId, ImmutableMap.of(operatorStats.getOperatorType(), new OperatorInputStats(operatorStats.getTotalDrivers(), operatorStats.getInputPositions(), operatorStats.getSumSquaredInputPositions())), PlanPrinter::mergeOperatorInputStatsMaps);
if (operatorStats.getInfo() instanceof HashCollisionsInfo) {
HashCollisionsInfo hashCollisionsInfo = (HashCollisionsInfo) operatorStats.getInfo();
operatorHashCollisionsStats.merge(planNodeId, ImmutableMap.of(operatorStats.getOperatorType(), new OperatorHashCollisionsStats(hashCollisionsInfo.getWeightedHashCollisions(), hashCollisionsInfo.getWeightedSumSquaredHashCollisions(), hashCollisionsInfo.getWeightedExpectedHashCollisions())), PlanPrinter::mergeOperatorHashCollisionsStatsMaps);
}
planNodeInputPositions.merge(planNodeId, operatorStats.getInputPositions(), Long::sum);
planNodeInputBytes.merge(planNodeId, operatorStats.getInputDataSize().toBytes(), Long::sum);
processedNodes.add(planNodeId);
}
// Gather output statistics
processedNodes.clear();
for (OperatorStats operatorStats : reverse(pipelineStats.getOperatorSummaries())) {
PlanNodeId planNodeId = operatorStats.getPlanNodeId();
// An "internal" pipeline like a hash build, links to another pipeline which is the actual output for this plan node
if (operatorStats.getPlanNodeId().equals(outputPlanNode) && !pipelineStats.isOutputPipeline()) {
continue;
}
if (processedNodes.contains(planNodeId)) {
continue;
}
planNodeOutputPositions.merge(planNodeId, operatorStats.getOutputPositions(), Long::sum);
planNodeOutputBytes.merge(planNodeId, operatorStats.getOutputDataSize().toBytes(), Long::sum);
processedNodes.add(planNodeId);
}
}
List<PlanNodeStats> stats = new ArrayList<>();
for (Map.Entry<PlanNodeId, Long> entry : planNodeWallMillis.entrySet()) {
PlanNodeId planNodeId = entry.getKey();
stats.add(new PlanNodeStats(planNodeId, new Duration(planNodeWallMillis.get(planNodeId), MILLISECONDS), planNodeInputPositions.get(planNodeId), succinctDataSize(planNodeInputBytes.get(planNodeId), BYTE), // and therefore only have wall time, but no output stats
planNodeOutputPositions.getOrDefault(planNodeId, 0L), succinctDataSize(planNodeOutputBytes.getOrDefault(planNodeId, 0L), BYTE), operatorInputStats.get(planNodeId), // Only some operators emit hash collisions statistics
operatorHashCollisionsStats.getOrDefault(planNodeId, emptyMap())));
}
return stats;
}
use of io.airlift.units.Duration in project presto by prestodb.
the class IterativeOptimizer method optimize.
@Override
public PlanNode optimize(PlanNode plan, Session session, Map<Symbol, Type> types, SymbolAllocator symbolAllocator, PlanNodeIdAllocator idAllocator) {
if (!SystemSessionProperties.isNewOptimizerEnabled(session)) {
for (PlanOptimizer optimizer : legacyRules) {
plan = optimizer.optimize(plan, session, symbolAllocator.getTypes(), symbolAllocator, idAllocator);
}
return plan;
}
Memo memo = new Memo(idAllocator, plan);
Lookup lookup = node -> {
if (node instanceof GroupReference) {
return memo.getNode(((GroupReference) node).getGroupId());
}
return node;
};
Duration timeout = SystemSessionProperties.getOptimizerTimeout(session);
exploreGroup(memo.getRootGroup(), new Context(memo, lookup, idAllocator, symbolAllocator, System.nanoTime(), timeout.toMillis()));
return memo.extract();
}
use of io.airlift.units.Duration in project presto by prestodb.
the class TestCachingHiveMetastore method setUp.
@BeforeMethod
public void setUp() throws Exception {
mockClient = new MockHiveMetastoreClient();
MockHiveCluster mockHiveCluster = new MockHiveCluster(mockClient);
ListeningExecutorService executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed("test-%s")));
ThriftHiveMetastore thriftHiveMetastore = new ThriftHiveMetastore(mockHiveCluster);
metastore = new CachingHiveMetastore(new BridgingHiveMetastore(thriftHiveMetastore), executor, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000);
stats = thriftHiveMetastore.getStats();
}
Aggregations