use of io.trino.Session in project trino by trinodb.
the class FaultTolerantStageScheduler method schedule.
public synchronized void schedule() throws Exception {
if (failure != null) {
propagateIfPossible(failure, Exception.class);
throw new RuntimeException(failure);
}
if (closed) {
return;
}
if (isFinished()) {
return;
}
if (!blocked.isDone()) {
return;
}
if (taskSource == null) {
Map<PlanFragmentId, ListenableFuture<List<ExchangeSourceHandle>>> sourceHandles = sourceExchanges.entrySet().stream().collect(toImmutableMap(Map.Entry::getKey, entry -> toListenableFuture(entry.getValue().getSourceHandles())));
List<ListenableFuture<List<ExchangeSourceHandle>>> blockedFutures = sourceHandles.values().stream().filter(future -> !future.isDone()).collect(toImmutableList());
if (!blockedFutures.isEmpty()) {
blocked = asVoid(allAsList(blockedFutures));
return;
}
Multimap<PlanFragmentId, ExchangeSourceHandle> exchangeSources = sourceHandles.entrySet().stream().collect(flatteningToImmutableListMultimap(Map.Entry::getKey, entry -> getFutureValue(entry.getValue()).stream()));
taskSource = taskSourceFactory.create(session, stage.getFragment(), sourceExchanges, exchangeSources, stage::recordGetSplitTime, sourceBucketToPartitionMap, sourceBucketNodeMap);
}
while (!queuedPartitions.isEmpty() || !taskSource.isFinished()) {
while (queuedPartitions.isEmpty() && !taskSource.isFinished()) {
List<TaskDescriptor> tasks = taskSource.getMoreTasks();
for (TaskDescriptor task : tasks) {
queuedPartitions.add(task.getPartitionId());
allPartitions.add(task.getPartitionId());
taskDescriptorStorage.put(stage.getStageId(), task);
sinkExchange.ifPresent(exchange -> {
ExchangeSinkHandle exchangeSinkHandle = exchange.addSink(task.getPartitionId());
partitionToExchangeSinkHandleMap.put(task.getPartitionId(), exchangeSinkHandle);
});
}
if (taskSource.isFinished()) {
sinkExchange.ifPresent(Exchange::noMoreSinks);
}
}
if (queuedPartitions.isEmpty()) {
break;
}
int partition = queuedPartitions.peek();
Optional<TaskDescriptor> taskDescriptorOptional = taskDescriptorStorage.get(stage.getStageId(), partition);
if (taskDescriptorOptional.isEmpty()) {
// query has been terminated
return;
}
TaskDescriptor taskDescriptor = taskDescriptorOptional.get();
MemoryRequirements memoryRequirements = partitionMemoryRequirements.computeIfAbsent(partition, ignored -> partitionMemoryEstimator.getInitialMemoryRequirements(session, taskDescriptor.getNodeRequirements().getMemory()));
if (nodeLease == null) {
NodeRequirements nodeRequirements = taskDescriptor.getNodeRequirements();
nodeRequirements = nodeRequirements.withMemory(memoryRequirements.getRequiredMemory());
nodeLease = nodeAllocator.acquire(nodeRequirements);
}
if (!nodeLease.getNode().isDone()) {
blocked = asVoid(nodeLease.getNode());
return;
}
NodeInfo node = getFutureValue(nodeLease.getNode());
queuedPartitions.poll();
Multimap<PlanNodeId, Split> tableScanSplits = taskDescriptor.getSplits();
Multimap<PlanNodeId, Split> remoteSplits = createRemoteSplits(taskDescriptor.getExchangeSourceHandles());
Multimap<PlanNodeId, Split> taskSplits = ImmutableListMultimap.<PlanNodeId, Split>builder().putAll(tableScanSplits).putAll(remoteSplits).build();
int attemptId = getNextAttemptIdForPartition(partition);
OutputBuffers outputBuffers;
Optional<ExchangeSinkInstanceHandle> exchangeSinkInstanceHandle;
if (sinkExchange.isPresent()) {
ExchangeSinkHandle sinkHandle = partitionToExchangeSinkHandleMap.get(partition);
exchangeSinkInstanceHandle = Optional.of(sinkExchange.get().instantiateSink(sinkHandle, attemptId));
outputBuffers = createSpoolingExchangeOutputBuffers(exchangeSinkInstanceHandle.get());
} else {
exchangeSinkInstanceHandle = Optional.empty();
// stage will be consumed by the coordinator using direct exchange
outputBuffers = createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(new OutputBuffers.OutputBufferId(0), 0).withNoMoreBufferIds();
}
Set<PlanNodeId> allSourcePlanNodeIds = ImmutableSet.<PlanNodeId>builder().addAll(stage.getFragment().getPartitionedSources()).addAll(stage.getFragment().getRemoteSourceNodes().stream().map(RemoteSourceNode::getId).iterator()).build();
RemoteTask task = stage.createTask(node.getNode(), partition, attemptId, sinkBucketToPartitionMap, outputBuffers, taskSplits, allSourcePlanNodeIds.stream().collect(toImmutableListMultimap(Function.identity(), planNodeId -> Lifespan.taskWide())), allSourcePlanNodeIds).orElseThrow(() -> new VerifyException("stage execution is expected to be active"));
partitionToRemoteTaskMap.put(partition, task);
runningTasks.put(task.getTaskId(), task);
runningNodes.put(task.getTaskId(), nodeLease);
nodeLease = null;
if (taskFinishedFuture == null) {
taskFinishedFuture = SettableFuture.create();
}
taskLifecycleListener.taskCreated(stage.getFragment().getId(), task);
task.addStateChangeListener(taskStatus -> updateTaskStatus(taskStatus, exchangeSinkInstanceHandle));
task.start();
}
if (taskFinishedFuture != null && !taskFinishedFuture.isDone()) {
blocked = taskFinishedFuture;
}
}
use of io.trino.Session in project trino by trinodb.
the class Analyzer method analyze.
public Analysis analyze(Statement statement, QueryType queryType) {
Statement rewrittenStatement = statementRewrite.rewrite(analyzerFactory, session, statement, parameters, parameterLookup, warningCollector);
Analysis analysis = new Analysis(rewrittenStatement, parameterLookup, queryType);
StatementAnalyzer analyzer = statementAnalyzerFactory.createStatementAnalyzer(analysis, session, warningCollector, CorrelationSupport.ALLOWED);
analyzer.analyze(rewrittenStatement, Optional.empty());
// check column access permissions for each table
analysis.getTableColumnReferences().forEach((accessControlInfo, tableColumnReferences) -> tableColumnReferences.forEach((tableName, columns) -> accessControlInfo.getAccessControl().checkCanSelectFromColumns(accessControlInfo.getSecurityContext(session.getRequiredTransactionId(), session.getQueryId()), tableName, columns)));
return analysis;
}
use of io.trino.Session in project trino by trinodb.
the class PushPredicateIntoTableScan method pushFilterIntoTableScan.
public static Optional<PlanNode> pushFilterIntoTableScan(FilterNode filterNode, TableScanNode node, boolean pruneWithPredicateExpression, Session session, SymbolAllocator symbolAllocator, PlannerContext plannerContext, TypeAnalyzer typeAnalyzer, StatsProvider statsProvider, DomainTranslator domainTranslator) {
if (!isAllowPushdownIntoConnectors(session)) {
return Optional.empty();
}
SplitExpression splitExpression = splitExpression(plannerContext, filterNode.getPredicate());
DomainTranslator.ExtractionResult decomposedPredicate = DomainTranslator.getExtractionResult(plannerContext, session, splitExpression.getDeterministicPredicate(), symbolAllocator.getTypes());
TupleDomain<ColumnHandle> newDomain = decomposedPredicate.getTupleDomain().transformKeys(node.getAssignments()::get).intersect(node.getEnforcedConstraint());
Map<NodeRef<Expression>, Type> remainingExpressionTypes = typeAnalyzer.getTypes(session, symbolAllocator.getTypes(), decomposedPredicate.getRemainingExpression());
Optional<ConnectorExpression> connectorExpression = new ConnectorExpressionTranslator.SqlToConnectorExpressionTranslator(session, remainingExpressionTypes, plannerContext).process(decomposedPredicate.getRemainingExpression());
Map<String, ColumnHandle> connectorExpressionAssignments = connectorExpression.map(ignored -> node.getAssignments().entrySet().stream().collect(toImmutableMap(entry -> entry.getKey().getName(), Map.Entry::getValue))).orElse(ImmutableMap.of());
Map<ColumnHandle, Symbol> assignments = ImmutableBiMap.copyOf(node.getAssignments()).inverse();
Constraint constraint;
// use evaluator only when there is some predicate which could not be translated into tuple domain
if (pruneWithPredicateExpression && !TRUE_LITERAL.equals(decomposedPredicate.getRemainingExpression())) {
LayoutConstraintEvaluator evaluator = new LayoutConstraintEvaluator(plannerContext, typeAnalyzer, session, symbolAllocator.getTypes(), node.getAssignments(), combineConjuncts(plannerContext.getMetadata(), splitExpression.getDeterministicPredicate(), // which would be expensive to evaluate in the call to isCandidate below.
domainTranslator.toPredicate(session, newDomain.simplify().transformKeys(assignments::get))));
constraint = new Constraint(newDomain, connectorExpression.orElse(TRUE), connectorExpressionAssignments, evaluator::isCandidate, evaluator.getArguments());
} else {
// Currently, invoking the expression interpreter is very expensive.
// TODO invoke the interpreter unconditionally when the interpreter becomes cheap enough.
constraint = new Constraint(newDomain, connectorExpression.orElse(TRUE), connectorExpressionAssignments);
}
// check if new domain is wider than domain already provided by table scan
if (constraint.predicate().isEmpty() && // TODO do we need to track enforced ConnectorExpression in TableScanNode?
TRUE.equals(connectorExpression.orElse(TRUE)) && newDomain.contains(node.getEnforcedConstraint())) {
Expression resultingPredicate = createResultingPredicate(plannerContext, session, symbolAllocator, typeAnalyzer, splitExpression.getDynamicFilter(), TRUE_LITERAL, splitExpression.getNonDeterministicPredicate(), decomposedPredicate.getRemainingExpression());
if (!TRUE_LITERAL.equals(resultingPredicate)) {
return Optional.of(new FilterNode(filterNode.getId(), node, resultingPredicate));
}
return Optional.of(node);
}
if (newDomain.isNone()) {
// to turn the subtree into a Values node
return Optional.of(new ValuesNode(node.getId(), node.getOutputSymbols(), ImmutableList.of()));
}
Optional<ConstraintApplicationResult<TableHandle>> result = plannerContext.getMetadata().applyFilter(session, node.getTable(), constraint);
if (result.isEmpty()) {
return Optional.empty();
}
TableHandle newTable = result.get().getHandle();
TableProperties newTableProperties = plannerContext.getMetadata().getTableProperties(session, newTable);
Optional<TablePartitioning> newTablePartitioning = newTableProperties.getTablePartitioning();
if (newTableProperties.getPredicate().isNone()) {
return Optional.of(new ValuesNode(node.getId(), node.getOutputSymbols(), ImmutableList.of()));
}
TupleDomain<ColumnHandle> remainingFilter = result.get().getRemainingFilter();
Optional<ConnectorExpression> remainingConnectorExpression = result.get().getRemainingExpression();
boolean precalculateStatistics = result.get().isPrecalculateStatistics();
verifyTablePartitioning(session, plannerContext.getMetadata(), node, newTablePartitioning);
TableScanNode tableScan = new TableScanNode(node.getId(), newTable, node.getOutputSymbols(), node.getAssignments(), computeEnforced(newDomain, remainingFilter), // TODO (https://github.com/trinodb/trino/issues/8144) distinguish between predicate pushed down and remaining
deriveTableStatisticsForPushdown(statsProvider, session, precalculateStatistics, filterNode), node.isUpdateTarget(), node.getUseConnectorNodePartitioning());
Expression remainingDecomposedPredicate;
if (remainingConnectorExpression.isEmpty() || remainingConnectorExpression.equals(connectorExpression)) {
remainingDecomposedPredicate = decomposedPredicate.getRemainingExpression();
} else {
Map<String, Symbol> variableMappings = assignments.values().stream().collect(toImmutableMap(Symbol::getName, Function.identity()));
Expression translatedExpression = ConnectorExpressionTranslator.translate(session, remainingConnectorExpression.get(), plannerContext, variableMappings, new LiteralEncoder(plannerContext));
if (connectorExpression.isEmpty()) {
remainingDecomposedPredicate = ExpressionUtils.combineConjuncts(plannerContext.getMetadata(), translatedExpression, decomposedPredicate.getRemainingExpression());
} else {
remainingDecomposedPredicate = translatedExpression;
}
}
Expression resultingPredicate = createResultingPredicate(plannerContext, session, symbolAllocator, typeAnalyzer, splitExpression.getDynamicFilter(), domainTranslator.toPredicate(session, remainingFilter.transformKeys(assignments::get)), splitExpression.getNonDeterministicPredicate(), remainingDecomposedPredicate);
if (!TRUE_LITERAL.equals(resultingPredicate)) {
return Optional.of(new FilterNode(filterNode.getId(), tableScan, resultingPredicate));
}
return Optional.of(tableScan);
}
use of io.trino.Session in project trino by trinodb.
the class TaskResource method createOrUpdateTask.
@ResourceSecurity(INTERNAL_ONLY)
@POST
@Path("{taskId}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public void createOrUpdateTask(@PathParam("taskId") TaskId taskId, TaskUpdateRequest taskUpdateRequest, @Context UriInfo uriInfo, @Suspended AsyncResponse asyncResponse) {
requireNonNull(taskUpdateRequest, "taskUpdateRequest is null");
Session session = taskUpdateRequest.getSession().toSession(sessionPropertyManager, taskUpdateRequest.getExtraCredentials());
if (injectFailure(session.getTraceToken(), taskId, RequestType.CREATE_OR_UPDATE_TASK, asyncResponse)) {
return;
}
TaskInfo taskInfo = taskManager.updateTask(session, taskId, taskUpdateRequest.getFragment(), taskUpdateRequest.getSplitAssignments(), taskUpdateRequest.getOutputIds(), taskUpdateRequest.getDynamicFilterDomains());
if (shouldSummarize(uriInfo)) {
taskInfo = taskInfo.summarize();
}
asyncResponse.resume(Response.ok().entity(taskInfo).build());
}
use of io.trino.Session in project trino by trinodb.
the class DynamicFilterService method getDynamicFilteringStats.
public DynamicFiltersStats getDynamicFilteringStats(QueryId queryId, Session session) {
DynamicFilterContext context = dynamicFilterContexts.get(queryId);
if (context == null) {
// query has been removed or dynamic filtering is not enabled
return DynamicFiltersStats.EMPTY;
}
int lazyFilters = context.getLazyDynamicFilters().size();
int replicatedFilters = context.getReplicatedDynamicFilters().size();
int totalDynamicFilters = context.getTotalDynamicFilters();
ConnectorSession connectorSession = session.toConnectorSession();
List<DynamicFilterDomainStats> dynamicFilterDomainStats = context.getDynamicFilterSummaries().entrySet().stream().map(entry -> {
DynamicFilterId dynamicFilterId = entry.getKey();
return new DynamicFilterDomainStats(dynamicFilterId, // use small limit for readability
entry.getValue().toString(connectorSession, 2), context.getDynamicFilterCollectionDuration(dynamicFilterId));
}).collect(toImmutableList());
return new DynamicFiltersStats(dynamicFilterDomainStats, lazyFilters, replicatedFilters, totalDynamicFilters, dynamicFilterDomainStats.size());
}
Aggregations