use of io.trino.spi.QueryId in project trino by trinodb.
the class KillQueryProcedure method killQuery.
@UsedByGeneratedCode
public void killQuery(String queryId, String message, ConnectorSession session) {
QueryId query = parseQueryId(queryId);
try {
checkState(dispatchManager.isPresent(), "No dispatch manager is set. kill_query procedure should be executed on coordinator.");
DispatchQuery dispatchQuery = dispatchManager.get().getQuery(query);
checkCanKillQueryOwnedBy(((FullConnectorSession) session).getSession().getIdentity(), dispatchQuery.getSession().getIdentity(), accessControl);
// check before killing to provide the proper error message (this is racy)
if (dispatchQuery.isDone()) {
throw new TrinoException(NOT_SUPPORTED, "Target query is not running: " + queryId);
}
dispatchQuery.fail(createKillQueryException(message));
// verify if the query was killed (if not, we lost the race)
checkState(dispatchQuery.isDone(), "Failure to fail the query: %s", query);
if (!ADMINISTRATIVELY_KILLED.toErrorCode().equals(dispatchQuery.getErrorCode().orElse(null))) {
throw new TrinoException(NOT_SUPPORTED, "Target query is not running: " + queryId);
}
} catch (NoSuchElementException e) {
throw new TrinoException(NOT_FOUND, "Target query not found: " + queryId);
}
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class TotalReservationOnBlockedNodesLowMemoryKiller method chooseWholeQueryToKill.
private Optional<KillTarget> chooseWholeQueryToKill(List<MemoryInfo> nodes) {
Map<QueryId, Long> memoryReservationOnBlockedNodes = new HashMap<>();
for (MemoryInfo node : nodes) {
MemoryPoolInfo memoryPool = node.getPool();
if (memoryPool == null) {
continue;
}
if (memoryPool.getFreeBytes() + memoryPool.getReservedRevocableBytes() > 0) {
continue;
}
Map<QueryId, Long> queryMemoryReservations = memoryPool.getQueryMemoryReservations();
queryMemoryReservations.forEach((queryId, memoryReservation) -> {
memoryReservationOnBlockedNodes.compute(queryId, (id, oldValue) -> oldValue == null ? memoryReservation : oldValue + memoryReservation);
});
}
return memoryReservationOnBlockedNodes.entrySet().stream().max(comparingLong(Map.Entry::getValue)).map(Map.Entry::getKey).map(KillTarget::wholeQuery);
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class WorkerResource method getThreads.
@ResourceSecurity(WEB_UI)
@GET
@Path("{nodeId}/task/{taskId}")
public Response getThreads(@PathParam("taskId") TaskId task, @PathParam("nodeId") String nodeId, @Context HttpServletRequest servletRequest, @Context HttpHeaders httpHeaders) {
QueryId queryId = task.getQueryId();
Optional<QueryInfo> queryInfo = dispatchManager.getFullQueryInfo(queryId);
if (queryInfo.isPresent()) {
try {
checkCanViewQueryOwnedBy(sessionContextFactory.extractAuthorizedIdentity(servletRequest, httpHeaders, alternateHeaderName), queryInfo.get().getSession().toIdentity(), accessControl);
return proxyJsonResponse(nodeId, "v1/task/" + task);
} catch (AccessDeniedException e) {
throw new ForbiddenException();
}
}
return Response.status(Status.GONE).build();
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class ClusterMemoryPool method update.
public synchronized void update(List<MemoryInfo> memoryInfos, int assignedQueries) {
nodes = 0;
blockedNodes = 0;
totalDistributedBytes = 0;
reservedDistributedBytes = 0;
reservedRevocableDistributedBytes = 0;
this.assignedQueries = assignedQueries;
this.queryMemoryReservations.clear();
this.queryMemoryAllocations.clear();
this.queryMemoryRevocableReservations.clear();
for (MemoryInfo info : memoryInfos) {
MemoryPoolInfo poolInfo = info.getPool();
nodes++;
if (poolInfo.getFreeBytes() + poolInfo.getReservedRevocableBytes() <= 0) {
blockedNodes++;
}
totalDistributedBytes += poolInfo.getMaxBytes();
reservedDistributedBytes += poolInfo.getReservedBytes();
reservedRevocableDistributedBytes += poolInfo.getReservedRevocableBytes();
for (Map.Entry<QueryId, Long> entry : poolInfo.getQueryMemoryReservations().entrySet()) {
queryMemoryReservations.merge(entry.getKey(), entry.getValue(), Long::sum);
}
for (Map.Entry<QueryId, List<MemoryAllocation>> entry : poolInfo.getQueryMemoryAllocations().entrySet()) {
queryMemoryAllocations.merge(entry.getKey(), entry.getValue(), this::mergeQueryAllocations);
}
for (Map.Entry<QueryId, Long> entry : poolInfo.getQueryMemoryRevocableReservations().entrySet()) {
queryMemoryRevocableReservations.merge(entry.getKey(), entry.getValue(), Long::sum);
}
}
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class PhasedExecutionSchedule method extractDependenciesAndReturnNonLazyFragments.
private Set<PlanFragmentId> extractDependenciesAndReturnNonLazyFragments(Collection<StageExecution> stages) {
if (stages.isEmpty()) {
return ImmutableSet.of();
}
QueryId queryId = stages.stream().map(stage -> stage.getStageId().getQueryId()).findAny().orElseThrow();
List<PlanFragment> fragments = stages.stream().map(StageExecution::getFragment).collect(toImmutableList());
// Build a graph where the plan fragments are vertexes and the edges represent
// a before -> after relationship. Destination fragment should be started only
// when source fragment is completed. For example, a join hash build has an edge
// to the join probe.
Visitor visitor = new Visitor(queryId, fragments);
visitor.processAllFragments();
// Make sure there are no strongly connected components as it would mean circular dependency between stages
List<Set<PlanFragmentId>> components = new StrongConnectivityInspector<>(fragmentDependency).stronglyConnectedSets();
verify(components.size() == fragmentDependency.vertexSet().size(), "circular dependency between stages");
return visitor.getNonLazyFragments();
}
Aggregations