use of io.trino.execution.StageInfo in project trino by trinodb.
the class Query method globalUniqueNodes.
private static Set<String> globalUniqueNodes(StageInfo stageInfo) {
if (stageInfo == null) {
return ImmutableSet.of();
}
ImmutableSet.Builder<String> nodes = ImmutableSet.builder();
for (TaskInfo task : stageInfo.getTasks()) {
// todo add nodeId to TaskInfo
URI uri = task.getTaskStatus().getSelf();
nodes.add(uri.getHost() + ":" + uri.getPort());
}
for (StageInfo subStage : stageInfo.getSubStages()) {
nodes.addAll(globalUniqueNodes(subStage));
}
return nodes.build();
}
use of io.trino.execution.StageInfo in project trino by trinodb.
the class Query method toStatementStats.
private static StatementStats toStatementStats(QueryInfo queryInfo) {
QueryStats queryStats = queryInfo.getQueryStats();
StageInfo outputStage = queryInfo.getOutputStage().orElse(null);
return StatementStats.builder().setState(queryInfo.getState().toString()).setQueued(queryInfo.getState() == QueryState.QUEUED).setScheduled(queryInfo.isScheduled()).setNodes(globalUniqueNodes(outputStage).size()).setTotalSplits(queryStats.getTotalDrivers()).setQueuedSplits(queryStats.getQueuedDrivers()).setRunningSplits(queryStats.getRunningDrivers() + queryStats.getBlockedDrivers()).setCompletedSplits(queryStats.getCompletedDrivers()).setCpuTimeMillis(queryStats.getTotalCpuTime().toMillis()).setWallTimeMillis(queryStats.getTotalScheduledTime().toMillis()).setQueuedTimeMillis(queryStats.getQueuedTime().toMillis()).setElapsedTimeMillis(queryStats.getElapsedTime().toMillis()).setProcessedRows(queryStats.getRawInputPositions()).setProcessedBytes(queryStats.getRawInputDataSize().toBytes()).setPhysicalInputBytes(queryStats.getPhysicalInputDataSize().toBytes()).setPeakMemoryBytes(queryStats.getPeakUserMemoryReservation().toBytes()).setSpilledBytes(queryStats.getSpilledDataSize().toBytes()).setRootStage(toStageStats(outputStage)).build();
}
use of io.trino.execution.StageInfo in project trino by trinodb.
the class QueryMonitor method logQueryTimeline.
private static void logQueryTimeline(QueryInfo queryInfo) {
try {
QueryStats queryStats = queryInfo.getQueryStats();
DateTime queryStartTime = queryStats.getCreateTime();
DateTime queryEndTime = queryStats.getEndTime();
// query didn't finish cleanly
if (queryStartTime == null || queryEndTime == null) {
return;
}
// planning duration -- start to end of planning
long planning = queryStats.getPlanningTime().toMillis();
// Time spent waiting for required no. of worker nodes to be present
long waiting = queryStats.getResourceWaitingTime().toMillis();
List<StageInfo> stages = getAllStages(queryInfo.getOutputStage());
// long lastSchedulingCompletion = 0;
long firstTaskStartTime = queryEndTime.getMillis();
long lastTaskStartTime = queryStartTime.getMillis() + planning;
long lastTaskEndTime = queryStartTime.getMillis() + planning;
for (StageInfo stage : stages) {
// only consider leaf stages
if (!stage.getSubStages().isEmpty()) {
continue;
}
for (TaskInfo taskInfo : stage.getTasks()) {
TaskStats taskStats = taskInfo.getStats();
DateTime firstStartTime = taskStats.getFirstStartTime();
if (firstStartTime != null) {
firstTaskStartTime = Math.min(firstStartTime.getMillis(), firstTaskStartTime);
}
DateTime lastStartTime = taskStats.getLastStartTime();
if (lastStartTime != null) {
lastTaskStartTime = max(lastStartTime.getMillis(), lastTaskStartTime);
}
DateTime endTime = taskStats.getEndTime();
if (endTime != null) {
lastTaskEndTime = max(endTime.getMillis(), lastTaskEndTime);
}
}
}
long elapsed = max(queryEndTime.getMillis() - queryStartTime.getMillis(), 0);
long scheduling = max(firstTaskStartTime - queryStartTime.getMillis() - planning, 0);
long running = max(lastTaskEndTime - firstTaskStartTime, 0);
long finishing = max(queryEndTime.getMillis() - lastTaskEndTime, 0);
logQueryTimeline(queryInfo.getQueryId(), queryInfo.getSession().getTransactionId().map(TransactionId::toString).orElse(""), elapsed, planning, waiting, scheduling, running, finishing, queryStartTime, queryEndTime);
} catch (Exception e) {
log.error(e, "Error logging query timeline");
}
}
use of io.trino.execution.StageInfo in project trino by trinodb.
the class PlanPrinter method textDistributedPlan.
public static String textDistributedPlan(StageInfo outputStageInfo, QueryStats queryStats, ValuePrinter valuePrinter, boolean verbose) {
Map<PlanNodeId, TableInfo> tableInfos = getAllStages(Optional.of(outputStageInfo)).stream().map(StageInfo::getTables).map(Map::entrySet).flatMap(Collection::stream).collect(toImmutableMap(Entry::getKey, Entry::getValue));
StringBuilder builder = new StringBuilder();
List<StageInfo> allStages = getAllStages(Optional.of(outputStageInfo));
List<PlanFragment> allFragments = allStages.stream().map(StageInfo::getPlan).collect(toImmutableList());
Map<PlanNodeId, PlanNodeStats> aggregatedStats = aggregateStageStats(allStages);
Map<DynamicFilterId, DynamicFilterDomainStats> dynamicFilterDomainStats = queryStats.getDynamicFiltersStats().getDynamicFilterDomainStats().stream().collect(toImmutableMap(DynamicFilterDomainStats::getDynamicFilterId, identity()));
TypeProvider typeProvider = getTypeProvider(allFragments);
for (StageInfo stageInfo : allStages) {
builder.append(formatFragment(tableScanNode -> tableInfos.get(tableScanNode.getId()), dynamicFilterDomainStats, valuePrinter, stageInfo.getPlan(), Optional.of(stageInfo), Optional.of(aggregatedStats), verbose, typeProvider));
}
return builder.toString();
}
use of io.trino.execution.StageInfo in project trino by trinodb.
the class StatsAndCosts method reconstructStatsAndCosts.
private static void reconstructStatsAndCosts(StageInfo stage, ImmutableMap.Builder<PlanNodeId, PlanNodeStatsEstimate> planNodeStats, ImmutableMap.Builder<PlanNodeId, PlanCostEstimate> planNodeCosts) {
PlanFragment planFragment = stage.getPlan();
if (planFragment != null) {
planNodeStats.putAll(planFragment.getStatsAndCosts().getStats());
planNodeCosts.putAll(planFragment.getStatsAndCosts().getCosts());
}
for (StageInfo subStage : stage.getSubStages()) {
reconstructStatsAndCosts(subStage, planNodeStats, planNodeCosts);
}
}
Aggregations