Search in sources :

Example 96 with PlanNodeId

use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.

the class CountAggregationBenchmark method createOperatorFactories.

@Override
protected List<? extends OperatorFactory> createOperatorFactories() {
    OperatorFactory tableScanOperator = createTableScanOperator(0, new PlanNodeId("test"), "orders", "orderkey");
    AggregationOperatorFactory aggregationOperator = new AggregationOperatorFactory(1, new PlanNodeId("test"), ImmutableList.of(countFunction.bind(ImmutableList.of(0))));
    return ImmutableList.of(tableScanOperator, aggregationOperator);
}
Also used : PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) AggregationOperatorFactory(io.trino.operator.AggregationOperator.AggregationOperatorFactory) OperatorFactory(io.trino.operator.OperatorFactory) AggregationOperatorFactory(io.trino.operator.AggregationOperator.AggregationOperatorFactory)

Example 97 with PlanNodeId

use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.

the class SqlTaskExecution method schedulePartitionedSource.

private synchronized void schedulePartitionedSource(SplitAssignment splitAssignmentUpdate) {
    mergeIntoPendingSplits(splitAssignmentUpdate.getPlanNodeId(), splitAssignmentUpdate.getSplits(), splitAssignmentUpdate.getNoMoreSplitsForLifespan(), splitAssignmentUpdate.isNoMoreSplits());
    while (true) {
        // SchedulingLifespanManager tracks how far each Lifespan has been scheduled. Here is an example.
        // Let's say there are 4 source pipelines/nodes: A, B, C, and D, in scheduling order.
        // And we're processing 3 concurrent lifespans at a time. In this case, we could have
        // 
        // * Lifespan 10:  A   B  [C]  D; i.e. Pipeline A and B has finished scheduling (but not necessarily finished running).
        // * Lifespan 20: [A]  B   C   D
        // * Lifespan 30:  A  [B]  C   D
        // 
        // To recap, SchedulingLifespanManager records the next scheduling source node for each lifespan.
        Iterator<SchedulingLifespan> activeLifespans = schedulingLifespanManager.getActiveLifespans();
        boolean madeProgress = false;
        while (activeLifespans.hasNext()) {
            SchedulingLifespan schedulingLifespan = activeLifespans.next();
            Lifespan lifespan = schedulingLifespan.getLifespan();
            // This is why getSchedulingPlanNode returns an Optional.
            while (true) {
                Optional<PlanNodeId> optionalSchedulingPlanNode = schedulingLifespan.getSchedulingPlanNode();
                if (optionalSchedulingPlanNode.isEmpty()) {
                    break;
                }
                PlanNodeId schedulingPlanNode = optionalSchedulingPlanNode.get();
                DriverSplitRunnerFactory partitionedDriverRunnerFactory = driverRunnerFactoriesWithSplitLifeCycle.get(schedulingPlanNode);
                PendingSplits pendingSplits = pendingSplitsByPlanNode.get(schedulingPlanNode).getLifespan(lifespan);
                // Enqueue driver runners with driver group lifecycle for this driver life cycle, if not already enqueued.
                if (!lifespan.isTaskWide() && !schedulingLifespan.getAndSetDriversForDriverGroupLifeCycleScheduled()) {
                    scheduleDriversForDriverGroupLifeCycle(lifespan);
                }
                // Enqueue driver runners with split lifecycle for this plan node and driver life cycle combination.
                ImmutableList.Builder<DriverSplitRunner> runners = ImmutableList.builder();
                for (ScheduledSplit scheduledSplit : pendingSplits.removeAllSplits()) {
                    // create a new driver for the split
                    runners.add(partitionedDriverRunnerFactory.createDriverRunner(scheduledSplit, lifespan));
                }
                enqueueDriverSplitRunner(false, runners.build());
                // move on to the next plan node.
                if (pendingSplits.getState() != NO_MORE_SPLITS) {
                    break;
                }
                partitionedDriverRunnerFactory.noMoreDriverRunner(ImmutableList.of(lifespan));
                pendingSplits.markAsCleanedUp();
                schedulingLifespan.nextPlanNode();
                madeProgress = true;
                if (schedulingLifespan.isDone()) {
                    break;
                }
            }
        }
        if (!madeProgress) {
            break;
        }
    }
    if (splitAssignmentUpdate.isNoMoreSplits()) {
        schedulingLifespanManager.noMoreSplits(splitAssignmentUpdate.getPlanNodeId());
    }
}
Also used : PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) ImmutableList(com.google.common.collect.ImmutableList)

Example 98 with PlanNodeId

use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.

the class ExtractSpatialJoins method tryCreateSpatialJoin.

private static Result tryCreateSpatialJoin(Context context, JoinNode joinNode, Expression filter, PlanNodeId nodeId, List<Symbol> outputSymbols, ComparisonExpression spatialComparison, PlannerContext plannerContext, SplitManager splitManager, PageSourceManager pageSourceManager, TypeAnalyzer typeAnalyzer) {
    PlanNode leftNode = joinNode.getLeft();
    PlanNode rightNode = joinNode.getRight();
    List<Symbol> leftSymbols = leftNode.getOutputSymbols();
    List<Symbol> rightSymbols = rightNode.getOutputSymbols();
    Expression radius;
    Optional<Symbol> newRadiusSymbol;
    ComparisonExpression newComparison;
    if (spatialComparison.getOperator() == LESS_THAN || spatialComparison.getOperator() == LESS_THAN_OR_EQUAL) {
        // ST_Distance(a, b) <= r
        radius = spatialComparison.getRight();
        Set<Symbol> radiusSymbols = extractUnique(radius);
        if (radiusSymbols.isEmpty() || (rightSymbols.containsAll(radiusSymbols) && containsNone(leftSymbols, radiusSymbols))) {
            newRadiusSymbol = newRadiusSymbol(context, radius);
            newComparison = new ComparisonExpression(spatialComparison.getOperator(), spatialComparison.getLeft(), toExpression(newRadiusSymbol, radius));
        } else {
            return Result.empty();
        }
    } else {
        // r >= ST_Distance(a, b)
        radius = spatialComparison.getLeft();
        Set<Symbol> radiusSymbols = extractUnique(radius);
        if (radiusSymbols.isEmpty() || (rightSymbols.containsAll(radiusSymbols) && containsNone(leftSymbols, radiusSymbols))) {
            newRadiusSymbol = newRadiusSymbol(context, radius);
            newComparison = new ComparisonExpression(spatialComparison.getOperator().flip(), spatialComparison.getRight(), toExpression(newRadiusSymbol, radius));
        } else {
            return Result.empty();
        }
    }
    Expression newFilter = replaceExpression(filter, ImmutableMap.of(spatialComparison, newComparison));
    PlanNode newRightNode = newRadiusSymbol.map(symbol -> addProjection(context, rightNode, symbol, radius)).orElse(rightNode);
    JoinNode newJoinNode = new JoinNode(joinNode.getId(), joinNode.getType(), leftNode, newRightNode, joinNode.getCriteria(), joinNode.getLeftOutputSymbols(), joinNode.getRightOutputSymbols(), joinNode.isMaySkipOutputDuplicates(), Optional.of(newFilter), joinNode.getLeftHashSymbol(), joinNode.getRightHashSymbol(), joinNode.getDistributionType(), joinNode.isSpillable(), joinNode.getDynamicFilters(), joinNode.getReorderJoinStatsAndCost());
    return tryCreateSpatialJoin(context, newJoinNode, newFilter, nodeId, outputSymbols, (FunctionCall) newComparison.getLeft(), Optional.of(newComparison.getRight()), plannerContext, splitManager, pageSourceManager, typeAnalyzer);
}
Also used : EMPTY(io.trino.spi.connector.DynamicFilter.EMPTY) SpatialJoinUtils.extractSupportedSpatialComparisons(io.trino.util.SpatialJoinUtils.extractSupportedSpatialComparisons) SymbolsExtractor.extractUnique(io.trino.sql.planner.SymbolsExtractor.extractUnique) SplitBatch(io.trino.split.SplitSource.SplitBatch) SplitManager(io.trino.split.SplitManager) SystemSessionProperties.getSpatialPartitioningTableName(io.trino.SystemSessionProperties.getSpatialPartitioningTableName) FilterNode(io.trino.sql.planner.plan.FilterNode) PlanNode(io.trino.sql.planner.plan.PlanNode) LEFT(io.trino.sql.planner.plan.JoinNode.Type.LEFT) PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) Map(java.util.Map) SpatialJoinNode(io.trino.sql.planner.plan.SpatialJoinNode) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) JoinNode(io.trino.sql.planner.plan.JoinNode) INTEGER(io.trino.spi.type.IntegerType.INTEGER) Splitter(com.google.common.base.Splitter) FunctionCall(io.trino.sql.tree.FunctionCall) Patterns.join(io.trino.sql.planner.plan.Patterns.join) TypeSignature(io.trino.spi.type.TypeSignature) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) TypeSignatureTranslator.toSqlType(io.trino.sql.analyzer.TypeSignatureTranslator.toSqlType) KdbTree(io.trino.geospatial.KdbTree) Assignments(io.trino.sql.planner.plan.Assignments) Set(java.util.Set) TrinoException(io.trino.spi.TrinoException) ArrayType(io.trino.spi.type.ArrayType) SplitSource(io.trino.split.SplitSource) Context(io.trino.sql.planner.iterative.Rule.Context) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) String.format(java.lang.String.format) Constraint.alwaysTrue(io.trino.spi.connector.Constraint.alwaysTrue) LESS_THAN_OR_EQUAL(io.trino.sql.tree.ComparisonExpression.Operator.LESS_THAN_OR_EQUAL) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) INVALID_SPATIAL_PARTITIONING(io.trino.spi.StandardErrorCode.INVALID_SPATIAL_PARTITIONING) NOT_PARTITIONED(io.trino.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) Pattern(io.trino.matching.Pattern) SymbolReference(io.trino.sql.tree.SymbolReference) Split(io.trino.metadata.Split) DynamicFilter(io.trino.spi.connector.DynamicFilter) Optional(java.util.Optional) ExpressionNodeInliner.replaceExpression(io.trino.sql.planner.ExpressionNodeInliner.replaceExpression) Expression(io.trino.sql.tree.Expression) Session(io.trino.Session) PlannerContext(io.trino.sql.PlannerContext) Iterables(com.google.common.collect.Iterables) INNER(io.trino.sql.planner.plan.JoinNode.Type.INNER) Type(io.trino.spi.type.Type) Patterns.filter(io.trino.sql.planner.plan.Patterns.filter) Page(io.trino.spi.Page) Capture.newCapture(io.trino.matching.Capture.newCapture) Cast(io.trino.sql.tree.Cast) KdbTreeUtils(io.trino.geospatial.KdbTreeUtils) VARCHAR(io.trino.spi.type.VarcharType.VARCHAR) FunctionCallBuilder(io.trino.sql.planner.FunctionCallBuilder) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) UNGROUPED_SCHEDULING(io.trino.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) Objects.requireNonNull(java.util.Objects.requireNonNull) Result(io.trino.sql.planner.iterative.Rule.Result) ColumnHandle(io.trino.spi.connector.ColumnHandle) Rule(io.trino.sql.planner.iterative.Rule) Lifespan(io.trino.execution.Lifespan) ProjectNode(io.trino.sql.planner.plan.ProjectNode) Symbol(io.trino.sql.planner.Symbol) StringLiteral(io.trino.sql.tree.StringLiteral) SystemSessionProperties.isSpatialJoinEnabled(io.trino.SystemSessionProperties.isSpatialJoinEnabled) IOException(java.io.IOException) PageSourceManager(io.trino.split.PageSourceManager) LESS_THAN(io.trino.sql.tree.ComparisonExpression.Operator.LESS_THAN) MoreFutures.getFutureValue(io.airlift.concurrent.MoreFutures.getFutureValue) UnnestNode(io.trino.sql.planner.plan.UnnestNode) Capture(io.trino.matching.Capture) QualifiedName(io.trino.sql.tree.QualifiedName) DOUBLE(io.trino.spi.type.DoubleType.DOUBLE) TableHandle(io.trino.metadata.TableHandle) TypeAnalyzer(io.trino.sql.planner.TypeAnalyzer) QualifiedObjectName(io.trino.metadata.QualifiedObjectName) Patterns.source(io.trino.sql.planner.plan.Patterns.source) Captures(io.trino.matching.Captures) Metadata(io.trino.metadata.Metadata) VisibleForTesting(com.google.common.annotations.VisibleForTesting) TypeManager(io.trino.spi.type.TypeManager) SpatialJoinUtils.extractSupportedSpatialFunctions(io.trino.util.SpatialJoinUtils.extractSupportedSpatialFunctions) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) PlanNode(io.trino.sql.planner.plan.PlanNode) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) ExpressionNodeInliner.replaceExpression(io.trino.sql.planner.ExpressionNodeInliner.replaceExpression) Expression(io.trino.sql.tree.Expression) Symbol(io.trino.sql.planner.Symbol) SpatialJoinNode(io.trino.sql.planner.plan.SpatialJoinNode) JoinNode(io.trino.sql.planner.plan.JoinNode)

Example 99 with PlanNodeId

use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.

the class EliminateCrossJoins method buildJoinTree.

public static PlanNode buildJoinTree(List<Symbol> expectedOutputSymbols, JoinGraph graph, List<Integer> joinOrder, PlanNodeIdAllocator idAllocator) {
    requireNonNull(expectedOutputSymbols, "expectedOutputSymbols is null");
    requireNonNull(idAllocator, "idAllocator is null");
    requireNonNull(graph, "graph is null");
    joinOrder = ImmutableList.copyOf(requireNonNull(joinOrder, "joinOrder is null"));
    checkArgument(joinOrder.size() >= 2);
    PlanNode result = graph.getNode(joinOrder.get(0));
    Set<PlanNodeId> alreadyJoinedNodes = new HashSet<>();
    alreadyJoinedNodes.add(result.getId());
    for (int i = 1; i < joinOrder.size(); i++) {
        PlanNode rightNode = graph.getNode(joinOrder.get(i));
        alreadyJoinedNodes.add(rightNode.getId());
        ImmutableList.Builder<JoinNode.EquiJoinClause> criteria = ImmutableList.builder();
        for (JoinGraph.Edge edge : graph.getEdges(rightNode)) {
            PlanNode targetNode = edge.getTargetNode();
            if (alreadyJoinedNodes.contains(targetNode.getId())) {
                criteria.add(new JoinNode.EquiJoinClause(edge.getTargetSymbol(), edge.getSourceSymbol()));
            }
        }
        result = new JoinNode(idAllocator.getNextId(), JoinNode.Type.INNER, result, rightNode, criteria.build(), result.getOutputSymbols(), rightNode.getOutputSymbols(), false, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of(), Optional.empty());
    }
    List<Expression> filters = graph.getFilters();
    for (Expression filter : filters) {
        result = new FilterNode(idAllocator.getNextId(), result, filter);
    }
    // Some nodes are sensitive to what's produced (e.g., DistinctLimit node)
    return restrictOutputs(idAllocator, result, ImmutableSet.copyOf(expectedOutputSymbols)).orElse(result);
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) JoinNode(io.trino.sql.planner.plan.JoinNode) FilterNode(io.trino.sql.planner.plan.FilterNode) JoinGraph(io.trino.sql.planner.optimizations.joins.JoinGraph) PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) PlanNode(io.trino.sql.planner.plan.PlanNode) Expression(io.trino.sql.tree.Expression) HashSet(java.util.HashSet)

Example 100 with PlanNodeId

use of io.trino.sql.planner.plan.PlanNodeId in project trino by trinodb.

the class TestNodeScheduler method testTopologyAwareScheduling.

@Test(timeOut = 60 * 1000)
public void testTopologyAwareScheduling() {
    NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
    InMemoryNodeManager nodeManager = new InMemoryNodeManager();
    ImmutableList.Builder<InternalNode> nodeBuilder = ImmutableList.builder();
    nodeBuilder.add(new InternalNode("node1", URI.create("http://host1.rack1:11"), NodeVersion.UNKNOWN, false));
    nodeBuilder.add(new InternalNode("node2", URI.create("http://host2.rack1:12"), NodeVersion.UNKNOWN, false));
    nodeBuilder.add(new InternalNode("node3", URI.create("http://host3.rack2:13"), NodeVersion.UNKNOWN, false));
    ImmutableList<InternalNode> nodes = nodeBuilder.build();
    nodeManager.addNode(CONNECTOR_ID, nodes);
    // contents of taskMap indicate the node-task map for the current stage
    Map<InternalNode, RemoteTask> taskMap = new HashMap<>();
    NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(25).setIncludeCoordinator(false).setMaxPendingSplitsPerTask(20);
    TestNetworkTopology topology = new TestNetworkTopology();
    NodeSelectorFactory nodeSelectorFactory = new TopologyAwareNodeSelectorFactory(topology, nodeManager, nodeSchedulerConfig, nodeTaskMap, getNetworkTopologyConfig());
    NodeScheduler nodeScheduler = new NodeScheduler(nodeSelectorFactory);
    NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, Optional.of(CONNECTOR_ID));
    // Fill up the nodes with non-local data
    ImmutableSet.Builder<Split> nonRackLocalBuilder = ImmutableSet.builder();
    for (int i = 0; i < (25 + 11) * 3; i++) {
        nonRackLocalBuilder.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("data.other_rack", 1)), Lifespan.taskWide()));
    }
    Set<Split> nonRackLocalSplits = nonRackLocalBuilder.build();
    Multimap<InternalNode, Split> assignments = nodeSelector.computeAssignments(nonRackLocalSplits, ImmutableList.copyOf(taskMap.values())).getAssignments();
    MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor);
    int task = 0;
    for (InternalNode node : assignments.keySet()) {
        TaskId taskId = new TaskId(new StageId("test", 1), task, 0);
        task++;
        MockRemoteTaskFactory.MockRemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, node, ImmutableList.copyOf(assignments.get(node)), nodeTaskMap.createPartitionedSplitCountTracker(node, taskId));
        remoteTask.startSplits(25);
        nodeTaskMap.addTask(node, remoteTask);
        taskMap.put(node, remoteTask);
    }
    // Continue assigning to fill up part of the queue
    nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values()));
    assignments = nodeSelector.computeAssignments(nonRackLocalSplits, ImmutableList.copyOf(taskMap.values())).getAssignments();
    for (InternalNode node : assignments.keySet()) {
        RemoteTask remoteTask = taskMap.get(node);
        remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
    }
    nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values()));
    // Check that 3 of the splits were rejected, since they're non-local
    assertEquals(nonRackLocalSplits.size(), 3);
    // Assign rack-local splits
    ImmutableSet.Builder<Split> rackLocalSplits = ImmutableSet.builder();
    HostAddress dataHost1 = HostAddress.fromParts("data.rack1", 1);
    HostAddress dataHost2 = HostAddress.fromParts("data.rack2", 1);
    for (int i = 0; i < 6 * 2; i++) {
        rackLocalSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(dataHost1), Lifespan.taskWide()));
    }
    for (int i = 0; i < 6; i++) {
        rackLocalSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(dataHost2), Lifespan.taskWide()));
    }
    assignments = nodeSelector.computeAssignments(rackLocalSplits.build(), ImmutableList.copyOf(taskMap.values())).getAssignments();
    for (InternalNode node : assignments.keySet()) {
        RemoteTask remoteTask = taskMap.get(node);
        remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
    }
    Set<Split> unassigned = Sets.difference(rackLocalSplits.build(), new HashSet<>(assignments.values()));
    // Compute the assignments a second time to account for the fact that some splits may not have been assigned due to asynchronous
    // loading of the NetworkLocationCache
    assignments = nodeSelector.computeAssignments(unassigned, ImmutableList.copyOf(taskMap.values())).getAssignments();
    for (InternalNode node : assignments.keySet()) {
        RemoteTask remoteTask = taskMap.get(node);
        remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
    }
    unassigned = Sets.difference(unassigned, new HashSet<>(assignments.values()));
    assertEquals(unassigned.size(), 3);
    int rack1 = 0;
    int rack2 = 0;
    for (Split split : unassigned) {
        String rack = topology.locate(split.getAddresses().get(0)).getSegments().get(0);
        switch(rack) {
            case "rack1":
                rack1++;
                break;
            case "rack2":
                rack2++;
                break;
            default:
                throw new AssertionError("Unexpected rack: " + rack);
        }
    }
    assertEquals(rack1, 2);
    assertEquals(rack2, 1);
    // Assign local splits
    ImmutableSet.Builder<Split> localSplits = ImmutableSet.builder();
    localSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("host1.rack1", 1)), Lifespan.taskWide()));
    localSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("host2.rack1", 1)), Lifespan.taskWide()));
    localSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(HostAddress.fromParts("host3.rack2", 1)), Lifespan.taskWide()));
    assignments = nodeSelector.computeAssignments(localSplits.build(), ImmutableList.copyOf(taskMap.values())).getAssignments();
    assertEquals(assignments.size(), 3);
    assertEquals(assignments.keySet().size(), 3);
}
Also used : HashMap(java.util.HashMap) ImmutableList(com.google.common.collect.ImmutableList) TopologyAwareNodeSelectorFactory(io.trino.execution.scheduler.TopologyAwareNodeSelectorFactory) NodeSelectorFactory(io.trino.execution.scheduler.NodeSelectorFactory) UniformNodeSelectorFactory(io.trino.execution.scheduler.UniformNodeSelectorFactory) NodeSchedulerConfig(io.trino.execution.scheduler.NodeSchedulerConfig) HostAddress(io.trino.spi.HostAddress) PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) ImmutableSet(com.google.common.collect.ImmutableSet) NodeScheduler(io.trino.execution.scheduler.NodeScheduler) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) InMemoryNodeManager(io.trino.metadata.InMemoryNodeManager) InternalNode(io.trino.metadata.InternalNode) NodeSelector(io.trino.execution.scheduler.NodeSelector) UniformNodeSelector(io.trino.execution.scheduler.UniformNodeSelector) Split(io.trino.metadata.Split) ConnectorSplit(io.trino.spi.connector.ConnectorSplit) TopologyAwareNodeSelectorFactory(io.trino.execution.scheduler.TopologyAwareNodeSelectorFactory) Test(org.testng.annotations.Test)

Aggregations

PlanNodeId (io.trino.sql.planner.plan.PlanNodeId)206 Test (org.testng.annotations.Test)140 Page (io.trino.spi.Page)86 Type (io.trino.spi.type.Type)65 MaterializedResult (io.trino.testing.MaterializedResult)53 RowPagesBuilder (io.trino.RowPagesBuilder)43 Symbol (io.trino.sql.planner.Symbol)43 ImmutableList (com.google.common.collect.ImmutableList)42 ImmutableMap (com.google.common.collect.ImmutableMap)36 Optional (java.util.Optional)35 OperatorFactory (io.trino.operator.OperatorFactory)34 JoinNode (io.trino.sql.planner.plan.JoinNode)28 TableScanNode (io.trino.sql.planner.plan.TableScanNode)28 SymbolStatsEstimate (io.trino.cost.SymbolStatsEstimate)24 VarcharType.createUnboundedVarcharType (io.trino.spi.type.VarcharType.createUnboundedVarcharType)24 PlanNodeStatsEstimate (io.trino.cost.PlanNodeStatsEstimate)23 AfterClass (org.testng.annotations.AfterClass)23 Split (io.trino.metadata.Split)22 BIGINT (io.trino.spi.type.BigintType.BIGINT)22 JoinDistributionType (io.trino.sql.planner.OptimizerConfig.JoinDistributionType)22