Search in sources :

Example 6 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class PipelinedStageExecution method sourceTaskCreated.

private synchronized void sourceTaskCreated(PlanFragmentId fragmentId, RemoteTask sourceTask) {
    requireNonNull(fragmentId, "fragmentId is null");
    RemoteSourceNode remoteSource = exchangeSources.get(fragmentId);
    checkArgument(remoteSource != null, "Unknown remote source %s. Known sources are %s", fragmentId, exchangeSources.keySet());
    sourceTasks.put(fragmentId, sourceTask);
    OutputBufferManager outputBufferManager = outputBufferManagers.get(fragmentId);
    sourceTask.setOutputBuffers(outputBufferManager.getOutputBuffers());
    for (RemoteTask destinationTask : getAllTasks()) {
        destinationTask.addSplits(ImmutableMultimap.of(remoteSource.getId(), createExchangeSplit(sourceTask, destinationTask)));
    }
}
Also used : RemoteSourceNode(io.trino.sql.planner.plan.RemoteSourceNode) RemoteTask(io.trino.execution.RemoteTask)

Example 7 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class ScaledWriterScheduler method scheduleTasks.

private List<RemoteTask> scheduleTasks(int count) {
    if (count == 0) {
        return ImmutableList.of();
    }
    List<InternalNode> nodes = nodeSelector.selectRandomNodes(count, scheduledNodes);
    checkCondition(!scheduledNodes.isEmpty() || !nodes.isEmpty(), NO_NODES_AVAILABLE, "No nodes available to run query");
    ImmutableList.Builder<RemoteTask> tasks = ImmutableList.builder();
    for (InternalNode node : nodes) {
        Optional<RemoteTask> remoteTask = stage.scheduleTask(node, scheduledNodes.size(), ImmutableMultimap.of(), ImmutableMultimap.of());
        remoteTask.ifPresent(task -> {
            tasks.add(task);
            scheduledNodes.add(node);
        });
    }
    return tasks.build();
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) RemoteTask(io.trino.execution.RemoteTask) InternalNode(io.trino.metadata.InternalNode)

Example 8 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class SourcePartitionedScheduler method schedule.

@Override
public synchronized ScheduleResult schedule() {
    dropListenersFromWhenFinishedOrNewLifespansAdded();
    int overallSplitAssignmentCount = 0;
    ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder();
    List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>();
    boolean anyBlockedOnPlacements = false;
    boolean anyBlockedOnNextSplitBatch = false;
    boolean anyNotBlocked = false;
    for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) {
        Lifespan lifespan = entry.getKey();
        ScheduleGroup scheduleGroup = entry.getValue();
        Set<Split> pendingSplits = scheduleGroup.pendingSplits;
        if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) {
            verify(scheduleGroup.nextSplitBatchFuture == null);
        } else if (pendingSplits.isEmpty()) {
            // try to get the next batch
            if (scheduleGroup.nextSplitBatchFuture == null) {
                scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize - pendingSplits.size());
                long start = System.nanoTime();
                addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stageExecution.recordGetSplitTime(start));
            }
            if (scheduleGroup.nextSplitBatchFuture.isDone()) {
                SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture);
                scheduleGroup.nextSplitBatchFuture = null;
                pendingSplits.addAll(nextSplits.getSplits());
                if (nextSplits.isLastBatch()) {
                    if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && pendingSplits.isEmpty()) {
                        // Add an empty split in case no splits have been produced for the source.
                        // For source operators, they never take input, but they may produce output.
                        // This is well handled by the execution engine.
                        // However, there are certain non-source operators that may produce output without any input,
                        // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is ().
                        // Scheduling an empty split kicks off necessary driver instantiation to make this work.
                        pendingSplits.add(new Split(splitSource.getCatalogName(), new EmptySplit(splitSource.getCatalogName()), lifespan));
                    }
                    scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS;
                }
            } else {
                overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture);
                anyBlockedOnNextSplitBatch = true;
                continue;
            }
        }
        Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of();
        if (!pendingSplits.isEmpty()) {
            if (!scheduleGroup.placementFuture.isDone()) {
                anyBlockedOnPlacements = true;
                continue;
            }
            if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) {
                scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED;
            }
            if (state == State.INITIALIZED) {
                state = State.SPLITS_ADDED;
            }
            // calculate placements for splits
            SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(pendingSplits);
            splitAssignment = splitPlacementResult.getAssignments();
            // remove splits with successful placements
            // AbstractSet.removeAll performs terribly here.
            splitAssignment.values().forEach(pendingSplits::remove);
            overallSplitAssignmentCount += splitAssignment.size();
            // if not completed placed, mark scheduleGroup as blocked on placement
            if (!pendingSplits.isEmpty()) {
                scheduleGroup.placementFuture = splitPlacementResult.getBlocked();
                overallBlockedFutures.add(scheduleGroup.placementFuture);
                anyBlockedOnPlacements = true;
            }
        }
        // if no new splits will be assigned, update state and attach completion event
        Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of();
        if (pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) {
            scheduleGroup.state = ScheduleGroupState.DONE;
            if (!lifespan.isTaskWide()) {
                InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId());
                noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan);
            }
        }
        // assign the splits with successful placements
        overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification));
        // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here.
        if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) {
            anyNotBlocked = true;
        }
    }
    // Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now.
    if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (noMoreScheduleGroups && scheduleGroups.isEmpty() && splitSource.isFinished())) {
        switch(state) {
            case INITIALIZED:
                // But this shouldn't be possible. See usage of EmptySplit in this method.
                throw new IllegalStateException("At least 1 split should have been scheduled for this plan node");
            case SPLITS_ADDED:
                state = State.NO_MORE_SPLITS;
                Optional<List<Object>> tableExecuteSplitsInfo = splitSource.getTableExecuteSplitsInfo();
                // Here we assume that we can get non-empty tableExecuteSplitsInfo only for queries which facilitate single split source.
                // TODO support grouped execution
                tableExecuteSplitsInfo.ifPresent(info -> {
                    TableExecuteContext tableExecuteContext = tableExecuteContextManager.getTableExecuteContextForQuery(stageExecution.getStageId().getQueryId());
                    tableExecuteContext.setSplitsInfo(info);
                });
                splitSource.close();
            // fall through
            case NO_MORE_SPLITS:
                state = State.FINISHED;
                whenFinishedOrNewLifespanAdded.set(null);
            // fall through
            case FINISHED:
                return new ScheduleResult(true, overallNewTasks.build(), overallSplitAssignmentCount);
        }
        throw new IllegalStateException("Unknown state");
    }
    if (anyNotBlocked) {
        return new ScheduleResult(false, overallNewTasks.build(), overallSplitAssignmentCount);
    }
    boolean anySourceTaskBlocked = this.anySourceTaskBlocked.getAsBoolean();
    if (anySourceTaskBlocked) {
        // Dynamic filters might not be collected due to build side source tasks being blocked on full buffer.
        // In such case probe split generation that is waiting for dynamic filters should be unblocked to prevent deadlock.
        dynamicFilterService.unblockStageDynamicFilters(stageExecution.getStageId().getQueryId(), stageExecution.getAttemptId(), stageExecution.getFragment());
    }
    if (groupedExecution) {
        overallNewTasks.addAll(finalizeTaskCreationIfNecessary());
    } else if (anyBlockedOnPlacements && anySourceTaskBlocked) {
        // In a broadcast join, output buffers of the tasks in build source stage have to
        // hold onto all data produced before probe side task scheduling finishes,
        // even if the data is acknowledged by all known consumers. This is because
        // new consumers may be added until the probe side task scheduling finishes.
        // 
        // As a result, the following line is necessary to prevent deadlock
        // due to neither build nor probe can make any progress.
        // The build side blocks due to a full output buffer.
        // In the meantime the probe side split cannot be consumed since
        // builder side hash table construction has not finished.
        overallNewTasks.addAll(finalizeTaskCreationIfNecessary());
    }
    ScheduleResult.BlockedReason blockedReason;
    if (anyBlockedOnNextSplitBatch) {
        blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE;
    } else {
        blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP;
    }
    overallBlockedFutures.add(whenFinishedOrNewLifespanAdded);
    return new ScheduleResult(false, overallNewTasks.build(), nonCancellationPropagating(asVoid(whenAnyComplete(overallBlockedFutures))), blockedReason, overallSplitAssignmentCount);
}
Also used : ArrayList(java.util.ArrayList) SplitBatch(io.trino.split.SplitSource.SplitBatch) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) ImmutableSet(com.google.common.collect.ImmutableSet) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) EmptySplit(io.trino.split.EmptySplit) RemoteTask(io.trino.execution.RemoteTask) TableExecuteContext(io.trino.execution.TableExecuteContext) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) InternalNode(io.trino.metadata.InternalNode) EmptySplit(io.trino.split.EmptySplit) Split(io.trino.metadata.Split) Lifespan(io.trino.execution.Lifespan) BucketedSplitPlacementPolicy(io.trino.execution.scheduler.FixedSourcePartitionedScheduler.BucketedSplitPlacementPolicy)

Example 9 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class SourcePartitionedScheduler method assignSplits.

private Set<RemoteTask> assignSplits(Multimap<InternalNode, Split> splitAssignment, Multimap<InternalNode, Lifespan> noMoreSplitsNotification) {
    ImmutableSet.Builder<RemoteTask> newTasks = ImmutableSet.builder();
    ImmutableSet<InternalNode> nodes = ImmutableSet.<InternalNode>builder().addAll(splitAssignment.keySet()).addAll(noMoreSplitsNotification.keySet()).build();
    for (InternalNode node : nodes) {
        // source partitioned tasks can only receive broadcast data; otherwise it would have a different distribution
        ImmutableMultimap<PlanNodeId, Split> splits = ImmutableMultimap.<PlanNodeId, Split>builder().putAll(partitionedNode, splitAssignment.get(node)).build();
        ImmutableMultimap.Builder<PlanNodeId, Lifespan> noMoreSplits = ImmutableMultimap.builder();
        if (noMoreSplitsNotification.containsKey(node)) {
            noMoreSplits.putAll(partitionedNode, noMoreSplitsNotification.get(node));
        }
        RemoteTask task = scheduledTasks.get(node);
        if (task != null) {
            task.addSplits(splits);
            noMoreSplits.build().forEach(task::noMoreSplits);
        } else {
            scheduleTask(node, splits, noMoreSplits.build()).ifPresent(newTasks::add);
        }
    }
    return newTasks.build();
}
Also used : RemoteTask(io.trino.execution.RemoteTask) PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) ImmutableSet(com.google.common.collect.ImmutableSet) InternalNode(io.trino.metadata.InternalNode) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) EmptySplit(io.trino.split.EmptySplit) Split(io.trino.metadata.Split) Lifespan(io.trino.execution.Lifespan)

Example 10 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class TestHttpRemoteTask method testOutboundDynamicFilters.

@Test(timeOut = 30_000)
public void testOutboundDynamicFilters() throws Exception {
    DynamicFilterId filterId1 = new DynamicFilterId("df1");
    DynamicFilterId filterId2 = new DynamicFilterId("df2");
    SymbolAllocator symbolAllocator = new SymbolAllocator();
    Symbol symbol1 = symbolAllocator.newSymbol("DF_SYMBOL1", BIGINT);
    Symbol symbol2 = symbolAllocator.newSymbol("DF_SYMBOL2", BIGINT);
    SymbolReference df1 = symbol1.toSymbolReference();
    SymbolReference df2 = symbol2.toSymbolReference();
    ColumnHandle handle1 = new TestingColumnHandle("column1");
    ColumnHandle handle2 = new TestingColumnHandle("column2");
    QueryId queryId = new QueryId("test");
    TestingTaskResource testingTaskResource = new TestingTaskResource(new AtomicLong(System.nanoTime()), FailureScenario.NO_FAILURE);
    DynamicFilterService dynamicFilterService = new DynamicFilterService(PLANNER_CONTEXT.getMetadata(), PLANNER_CONTEXT.getFunctionManager(), new TypeOperators(), newDirectExecutorService());
    dynamicFilterService.registerQuery(queryId, TEST_SESSION, ImmutableSet.of(filterId1, filterId2), ImmutableSet.of(filterId1, filterId2), ImmutableSet.of());
    dynamicFilterService.stageCannotScheduleMoreTasks(new StageId(queryId, 1), 0, 1);
    DynamicFilter dynamicFilter = dynamicFilterService.createDynamicFilter(queryId, ImmutableList.of(new DynamicFilters.Descriptor(filterId1, df1), new DynamicFilters.Descriptor(filterId2, df2)), ImmutableMap.of(symbol1, handle1, symbol2, handle2), symbolAllocator.getTypes());
    // make sure initial dynamic filter is collected
    CompletableFuture<?> future = dynamicFilter.isBlocked();
    dynamicFilterService.addTaskDynamicFilters(new TaskId(new StageId(queryId.getId(), 1), 1, 0), ImmutableMap.of(filterId1, Domain.singleValue(BIGINT, 1L)));
    future.get();
    assertEquals(dynamicFilter.getCurrentPredicate(), TupleDomain.withColumnDomains(ImmutableMap.of(handle1, Domain.singleValue(BIGINT, 1L))));
    // Create remote task after dynamic filter is created to simulate new nodes joining
    HttpRemoteTaskFactory httpRemoteTaskFactory = createHttpRemoteTaskFactory(testingTaskResource, dynamicFilterService);
    RemoteTask remoteTask = createRemoteTask(httpRemoteTaskFactory, ImmutableSet.of(filterId1, filterId2));
    testingTaskResource.setInitialTaskInfo(remoteTask.getTaskInfo());
    remoteTask.start();
    assertEventually(new Duration(10, SECONDS), () -> assertEquals(testingTaskResource.getDynamicFiltersSentCounter(), 1L));
    assertEquals(testingTaskResource.getCreateOrUpdateCounter(), 1L);
    // schedule a couple of splits to trigger task updates
    addSplit(remoteTask, testingTaskResource, 1);
    addSplit(remoteTask, testingTaskResource, 2);
    // make sure dynamic filter was sent in task updates only once
    assertEquals(testingTaskResource.getDynamicFiltersSentCounter(), 1L);
    assertEquals(testingTaskResource.getCreateOrUpdateCounter(), 3L);
    assertEquals(testingTaskResource.getLatestDynamicFilterFromCoordinator(), ImmutableMap.of(filterId1, Domain.singleValue(BIGINT, 1L)));
    future = dynamicFilter.isBlocked();
    dynamicFilterService.addTaskDynamicFilters(new TaskId(new StageId(queryId.getId(), 1), 1, 0), ImmutableMap.of(filterId2, Domain.singleValue(BIGINT, 2L)));
    future.get();
    assertEquals(dynamicFilter.getCurrentPredicate(), TupleDomain.withColumnDomains(ImmutableMap.of(handle1, Domain.singleValue(BIGINT, 1L), handle2, Domain.singleValue(BIGINT, 2L))));
    // dynamic filter should be sent even though there were no further splits scheduled
    assertEventually(new Duration(10, SECONDS), () -> assertEquals(testingTaskResource.getDynamicFiltersSentCounter(), 2L));
    assertEquals(testingTaskResource.getCreateOrUpdateCounter(), 4L);
    // previously sent dynamic filter should not be repeated
    assertEquals(testingTaskResource.getLatestDynamicFilterFromCoordinator(), ImmutableMap.of(filterId2, Domain.singleValue(BIGINT, 2L)));
    httpRemoteTaskFactory.stop();
    dynamicFilterService.stop();
}
Also used : SymbolAllocator(io.trino.sql.planner.SymbolAllocator) TestingColumnHandle(io.trino.spi.connector.TestingColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) TaskId(io.trino.execution.TaskId) DynamicFilter(io.trino.spi.connector.DynamicFilter) Symbol(io.trino.sql.planner.Symbol) SymbolReference(io.trino.sql.tree.SymbolReference) QueryId(io.trino.spi.QueryId) StageId(io.trino.execution.StageId) RemoteTask(io.trino.execution.RemoteTask) Duration(io.airlift.units.Duration) TestingColumnHandle(io.trino.spi.connector.TestingColumnHandle) AtomicLong(java.util.concurrent.atomic.AtomicLong) HttpRemoteTaskFactory(io.trino.server.HttpRemoteTaskFactory) DynamicFilterService(io.trino.server.DynamicFilterService) DynamicFilterId(io.trino.sql.planner.plan.DynamicFilterId) TypeOperators(io.trino.spi.type.TypeOperators) Test(org.testng.annotations.Test)

Aggregations

RemoteTask (io.trino.execution.RemoteTask)26 InternalNode (io.trino.metadata.InternalNode)11 Test (org.testng.annotations.Test)11 NodeTaskMap (io.trino.execution.NodeTaskMap)9 ImmutableList (com.google.common.collect.ImmutableList)7 MockRemoteTask (io.trino.execution.MockRemoteTaskFactory.MockRemoteTask)7 PartitionedSplitsInfo (io.trino.execution.PartitionedSplitsInfo)7 PipelinedStageExecution.createPipelinedStageExecution (io.trino.execution.scheduler.PipelinedStageExecution.createPipelinedStageExecution)7 SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler (io.trino.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler)7 Split (io.trino.metadata.Split)7 PlanFragment (io.trino.sql.planner.PlanFragment)7 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)5 List (java.util.List)5 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)4 ImmutableSet (com.google.common.collect.ImmutableSet)4 Duration (io.airlift.units.Duration)4 Lifespan (io.trino.execution.Lifespan)4 TaskId (io.trino.execution.TaskId)4 Optional (java.util.Optional)4 Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)3