Search in sources :

Example 21 with FilterNode

use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.

the class RemoveRedundantPredicateAboveTableScan method apply.

@Override
public Result apply(FilterNode filterNode, Captures captures, Context context) {
    Session session = context.getSession();
    TableScanNode node = captures.get(TABLE_SCAN);
    Expression predicate = filterNode.getPredicate();
    Expression deterministicPredicate = filterDeterministicConjuncts(plannerContext.getMetadata(), predicate);
    Expression nonDeterministicPredicate = filterNonDeterministicConjuncts(plannerContext.getMetadata(), predicate);
    ExtractionResult decomposedPredicate = getFullyExtractedPredicates(session, deterministicPredicate, context.getSymbolAllocator().getTypes());
    if (decomposedPredicate.getTupleDomain().isAll()) {
        // no conjunct could be fully converted to tuple domain
        return Result.empty();
    }
    TupleDomain<ColumnHandle> predicateDomain = decomposedPredicate.getTupleDomain().transformKeys(node.getAssignments()::get);
    if (predicateDomain.isNone()) {
        // to turn the subtree into a Values node
        return Result.ofPlanNode(new ValuesNode(node.getId(), node.getOutputSymbols(), ImmutableList.of()));
    }
    if (node.getEnforcedConstraint().isNone()) {
        // table scans with none domain should be converted to ValuesNode
        return Result.ofPlanNode(new ValuesNode(node.getId(), node.getOutputSymbols(), ImmutableList.of()));
    }
    // is not NONE
    Map<ColumnHandle, Domain> enforcedColumnDomains = node.getEnforcedConstraint().getDomains().orElseThrow();
    TupleDomain<ColumnHandle> unenforcedDomain = predicateDomain.transformDomains((columnHandle, predicateColumnDomain) -> {
        Type type = predicateColumnDomain.getType();
        Domain enforcedColumnDomain = Optional.ofNullable(enforcedColumnDomains.get(columnHandle)).orElseGet(() -> Domain.all(type));
        if (predicateColumnDomain.contains(enforcedColumnDomain)) {
            // full enforced
            return Domain.all(type);
        }
        return predicateColumnDomain.intersect(enforcedColumnDomain);
    });
    if (unenforcedDomain.equals(predicateDomain)) {
        // no change in filter predicate
        return Result.empty();
    }
    Map<ColumnHandle, Symbol> assignments = ImmutableBiMap.copyOf(node.getAssignments()).inverse();
    Expression resultingPredicate = createResultingPredicate(plannerContext, session, context.getSymbolAllocator(), typeAnalyzer, // Dynamic filters are included in decomposedPredicate.getRemainingExpression()
    TRUE_LITERAL, new DomainTranslator(plannerContext).toPredicate(session, unenforcedDomain.transformKeys(assignments::get)), nonDeterministicPredicate, decomposedPredicate.getRemainingExpression());
    if (!TRUE_LITERAL.equals(resultingPredicate)) {
        return Result.ofPlanNode(new FilterNode(context.getIdAllocator().getNextId(), node, resultingPredicate));
    }
    return Result.ofPlanNode(node);
}
Also used : ColumnHandle(io.trino.spi.connector.ColumnHandle) ValuesNode(io.trino.sql.planner.plan.ValuesNode) Symbol(io.trino.sql.planner.Symbol) FilterNode(io.trino.sql.planner.plan.FilterNode) Type(io.trino.spi.type.Type) TableScanNode(io.trino.sql.planner.plan.TableScanNode) Expression(io.trino.sql.tree.Expression) DomainTranslator(io.trino.sql.planner.DomainTranslator) ExtractionResult(io.trino.sql.planner.DomainTranslator.ExtractionResult) Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain) Session(io.trino.Session)

Example 22 with FilterNode

use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.

the class TestSourcePartitionedScheduler method createFragment.

private static PlanFragment createFragment() {
    Symbol symbol = new Symbol("column");
    Symbol buildSymbol = new Symbol("buildColumn");
    // table scan with splitCount splits
    TableScanNode tableScan = TableScanNode.newInstance(TABLE_SCAN_NODE_ID, TEST_TABLE_HANDLE, ImmutableList.of(symbol), ImmutableMap.of(symbol, new TestingColumnHandle("column")), false, Optional.empty());
    FilterNode filterNode = new FilterNode(new PlanNodeId("filter_node_id"), tableScan, createDynamicFilterExpression(TEST_SESSION, createTestMetadataManager(), DYNAMIC_FILTER_ID, VARCHAR, symbol.toSymbolReference()));
    RemoteSourceNode remote = new RemoteSourceNode(new PlanNodeId("remote_id"), new PlanFragmentId("plan_fragment_id"), ImmutableList.of(buildSymbol), Optional.empty(), REPLICATE, RetryPolicy.NONE);
    return new PlanFragment(new PlanFragmentId("plan_id"), new JoinNode(new PlanNodeId("join_id"), INNER, filterNode, remote, ImmutableList.of(), tableScan.getOutputSymbols(), remote.getOutputSymbols(), false, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of(DYNAMIC_FILTER_ID, buildSymbol), Optional.empty()), ImmutableMap.of(symbol, VARCHAR), SOURCE_DISTRIBUTION, ImmutableList.of(TABLE_SCAN_NODE_ID), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(symbol)), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty());
}
Also used : PlanNodeId(io.trino.sql.planner.plan.PlanNodeId) TestingColumnHandle(io.trino.testing.TestingMetadata.TestingColumnHandle) RemoteSourceNode(io.trino.sql.planner.plan.RemoteSourceNode) TableScanNode(io.trino.sql.planner.plan.TableScanNode) Symbol(io.trino.sql.planner.Symbol) JoinNode(io.trino.sql.planner.plan.JoinNode) PartitioningScheme(io.trino.sql.planner.PartitioningScheme) FilterNode(io.trino.sql.planner.plan.FilterNode) PlanFragmentId(io.trino.sql.planner.plan.PlanFragmentId) PlanFragment(io.trino.sql.planner.PlanFragment)

Example 23 with FilterNode

use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.

the class TestPostgreSqlConnectorTest method testStringJoinPushdownWithCollate.

@Test
public void testStringJoinPushdownWithCollate() {
    PlanMatchPattern joinOverTableScans = node(JoinNode.class, anyTree(node(TableScanNode.class)), anyTree(node(TableScanNode.class)));
    PlanMatchPattern broadcastJoinOverTableScans = node(JoinNode.class, node(TableScanNode.class), exchange(ExchangeNode.Scope.LOCAL, exchange(ExchangeNode.Scope.REMOTE, ExchangeNode.Type.REPLICATE, node(TableScanNode.class))));
    Session sessionWithCollatePushdown = Session.builder(getSession()).setCatalogSessionProperty("postgresql", "enable_string_pushdown_with_collate", "true").build();
    Session session = joinPushdownEnabled(sessionWithCollatePushdown);
    // Disable DF here for the sake of negative test cases' expected plan. With DF enabled, some operators return in DF's FilterNode and some do not.
    Session withoutDynamicFiltering = Session.builder(getSession()).setSystemProperty("enable_dynamic_filtering", "false").setCatalogSessionProperty("postgresql", "enable_string_pushdown_with_collate", "true").build();
    String notDistinctOperator = "IS NOT DISTINCT FROM";
    List<String> nonEqualities = Stream.concat(Stream.of(JoinCondition.Operator.values()).filter(operator -> operator != JoinCondition.Operator.EQUAL).map(JoinCondition.Operator::getValue), Stream.of(notDistinctOperator)).collect(toImmutableList());
    try (TestTable nationLowercaseTable = new TestTable(// If a connector supports Join pushdown, but does not allow CTAS, we need to make the table creation here overridable.
    getQueryRunner()::execute, "nation_lowercase", "AS SELECT nationkey, lower(name) name, regionkey FROM nation")) {
        // basic case
        assertThat(query(session, "SELECT r.name, n.name FROM nation n JOIN region r ON n.regionkey = r.regionkey")).isFullyPushedDown();
        // join over different columns
        assertThat(query(session, "SELECT r.name, n.name FROM nation n JOIN region r ON n.nationkey = r.regionkey")).isFullyPushedDown();
        // pushdown when using USING
        assertThat(query(session, "SELECT r.name, n.name FROM nation n JOIN region r USING(regionkey)")).isFullyPushedDown();
        // varchar equality predicate
        assertConditionallyPushedDown(session, "SELECT n.name, n2.regionkey FROM nation n JOIN nation n2 ON n.name = n2.name", true, joinOverTableScans);
        assertConditionallyPushedDown(session, format("SELECT n.name, nl.regionkey FROM nation n JOIN %s nl ON n.name = nl.name", nationLowercaseTable.getName()), true, joinOverTableScans);
        // multiple bigint predicates
        assertThat(query(session, "SELECT n.name, c.name FROM nation n JOIN customer c ON n.nationkey = c.nationkey and n.regionkey = c.custkey")).isFullyPushedDown();
        // inequality
        for (String operator : nonEqualities) {
            // bigint inequality predicate
            assertThat(query(withoutDynamicFiltering, format("SELECT r.name, n.name FROM nation n JOIN region r ON n.regionkey %s r.regionkey", operator))).isNotFullyPushedDown(broadcastJoinOverTableScans);
            // varchar inequality predicate
            assertThat(query(withoutDynamicFiltering, format("SELECT n.name, nl.name FROM nation n JOIN %s nl ON n.name %s nl.name", nationLowercaseTable.getName(), operator))).isNotFullyPushedDown(broadcastJoinOverTableScans);
        }
        // inequality along with an equality, which constitutes an equi-condition and allows filter to remain as part of the Join
        for (String operator : nonEqualities) {
            assertConditionallyPushedDown(session, format("SELECT n.name, c.name FROM nation n JOIN customer c ON n.nationkey = c.nationkey AND n.regionkey %s c.custkey", operator), expectJoinPushdown(operator), joinOverTableScans);
        }
        // varchar inequality along with an equality, which constitutes an equi-condition and allows filter to remain as part of the Join
        for (String operator : nonEqualities) {
            assertConditionallyPushedDown(session, format("SELECT n.name, nl.name FROM nation n JOIN %s nl ON n.regionkey = nl.regionkey AND n.name %s nl.name", nationLowercaseTable.getName(), operator), expectJoinPushdown(operator), joinOverTableScans);
        }
        // LEFT JOIN
        assertThat(query(session, "SELECT r.name, n.name FROM nation n LEFT JOIN region r ON n.nationkey = r.regionkey")).isFullyPushedDown();
        assertThat(query(session, "SELECT r.name, n.name FROM region r LEFT JOIN nation n ON n.nationkey = r.regionkey")).isFullyPushedDown();
        // RIGHT JOIN
        assertThat(query(session, "SELECT r.name, n.name FROM nation n RIGHT JOIN region r ON n.nationkey = r.regionkey")).isFullyPushedDown();
        assertThat(query(session, "SELECT r.name, n.name FROM region r RIGHT JOIN nation n ON n.nationkey = r.regionkey")).isFullyPushedDown();
        // FULL JOIN
        assertConditionallyPushedDown(session, "SELECT r.name, n.name FROM nation n FULL JOIN region r ON n.nationkey = r.regionkey", true, joinOverTableScans);
        // Join over a (double) predicate
        assertThat(query(session, "" + "SELECT c.name, n.name " + "FROM (SELECT * FROM customer WHERE acctbal > 8000) c " + "JOIN nation n ON c.custkey = n.nationkey")).isFullyPushedDown();
        // Join over a varchar equality predicate
        assertConditionallyPushedDown(session, "SELECT c.name, n.name FROM (SELECT * FROM customer WHERE address = 'TcGe5gaZNgVePxU5kRrvXBfkasDTea') c " + "JOIN nation n ON c.custkey = n.nationkey", hasBehavior(SUPPORTS_PREDICATE_PUSHDOWN_WITH_VARCHAR_EQUALITY), joinOverTableScans);
        // join over aggregation
        assertConditionallyPushedDown(session, "SELECT * FROM (SELECT regionkey rk, count(nationkey) c FROM nation GROUP BY regionkey) n " + "JOIN region r ON n.rk = r.regionkey", hasBehavior(SUPPORTS_AGGREGATION_PUSHDOWN), joinOverTableScans);
        // join over LIMIT
        assertConditionallyPushedDown(session, "SELECT * FROM (SELECT nationkey FROM nation LIMIT 30) n " + "JOIN region r ON n.nationkey = r.regionkey", hasBehavior(SUPPORTS_LIMIT_PUSHDOWN), joinOverTableScans);
        // join over TopN
        assertConditionallyPushedDown(session, "SELECT * FROM (SELECT nationkey FROM nation ORDER BY regionkey LIMIT 5) n " + "JOIN region r ON n.nationkey = r.regionkey", hasBehavior(SUPPORTS_TOPN_PUSHDOWN), joinOverTableScans);
        // join over join
        assertThat(query(session, "SELECT * FROM nation n, region r, customer c WHERE n.regionkey = r.regionkey AND r.regionkey = c.custkey")).isFullyPushedDown();
    }
}
Also used : Connection(java.sql.Connection) IntStream.range(java.util.stream.IntStream.range) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) PlanMatchPattern(io.trino.sql.planner.assertions.PlanMatchPattern) Test(org.testng.annotations.Test) TestTable(io.trino.testing.sql.TestTable) MoreCollectors.onlyElement(com.google.common.collect.MoreCollectors.onlyElement) FilterNode(io.trino.sql.planner.plan.FilterNode) Duration(io.airlift.units.Duration) PlanMatchPattern.exchange(io.trino.sql.planner.assertions.PlanMatchPattern.exchange) Math.round(java.lang.Math.round) Map(java.util.Map) TestingConnectorBehavior(io.trino.testing.TestingConnectorBehavior) SqlExecutor(io.trino.testing.sql.SqlExecutor) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) JoinNode(io.trino.sql.planner.plan.JoinNode) Assert.assertFalse(org.testng.Assert.assertFalse) TableScanNode(io.trino.sql.planner.plan.TableScanNode) TestTable.randomTableSuffix(io.trino.testing.sql.TestTable.randomTableSuffix) ImmutableMap(com.google.common.collect.ImmutableMap) Range(io.trino.spi.predicate.Range) TestView(io.trino.testing.sql.TestView) BeforeClass(org.testng.annotations.BeforeClass) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) PostgreSqlQueryRunner.createPostgreSqlQueryRunner(io.trino.plugin.postgresql.PostgreSqlQueryRunner.createPostgreSqlQueryRunner) UUID(java.util.UUID) String.format(java.lang.String.format) Collectors.joining(java.util.stream.Collectors.joining) List(java.util.List) Stream(java.util.stream.Stream) PlanMatchPattern.anyTree(io.trino.sql.planner.assertions.PlanMatchPattern.anyTree) SUPPORTS_AGGREGATION_PUSHDOWN(io.trino.testing.TestingConnectorBehavior.SUPPORTS_AGGREGATION_PUSHDOWN) ExchangeNode(io.trino.sql.planner.plan.ExchangeNode) JdbcTableHandle(io.trino.plugin.jdbc.JdbcTableHandle) Session(io.trino.Session) JoinCondition(io.trino.spi.connector.JoinCondition) Assert.assertEquals(org.testng.Assert.assertEquals) SUPPORTS_TOPN_PUSHDOWN(io.trino.testing.TestingConnectorBehavior.SUPPORTS_TOPN_PUSHDOWN) JdbcSqlExecutor(io.trino.testing.sql.JdbcSqlExecutor) SQLException(java.sql.SQLException) ImmutableList(com.google.common.collect.ImmutableList) Verify.verify(com.google.common.base.Verify.verify) SUPPORTS_LIMIT_PUSHDOWN(io.trino.testing.TestingConnectorBehavior.SUPPORTS_LIMIT_PUSHDOWN) ColumnHandle(io.trino.spi.connector.ColumnHandle) SUPPORTS_PREDICATE_PUSHDOWN_WITH_VARCHAR_EQUALITY(io.trino.testing.TestingConnectorBehavior.SUPPORTS_PREDICATE_PUSHDOWN_WITH_VARCHAR_EQUALITY) JdbcColumnHandle(io.trino.plugin.jdbc.JdbcColumnHandle) RemoteDatabaseEvent(io.trino.plugin.jdbc.RemoteDatabaseEvent) TopNNode(io.trino.sql.planner.plan.TopNNode) TupleDomain(io.trino.spi.predicate.TupleDomain) BaseJdbcConnectorTest(io.trino.plugin.jdbc.BaseJdbcConnectorTest) PlanMatchPattern.node(io.trino.sql.planner.assertions.PlanMatchPattern.node) QueryRunner(io.trino.testing.QueryRunner) Statement(java.sql.Statement) Assert.assertTrue(org.testng.Assert.assertTrue) VarcharType.createVarcharType(io.trino.spi.type.VarcharType.createVarcharType) DriverManager(java.sql.DriverManager) PlanMatchPattern.tableScan(io.trino.sql.planner.assertions.PlanMatchPattern.tableScan) SECONDS(java.util.concurrent.TimeUnit.SECONDS) TableScanNode(io.trino.sql.planner.plan.TableScanNode) PlanMatchPattern(io.trino.sql.planner.assertions.PlanMatchPattern) TestTable(io.trino.testing.sql.TestTable) Session(io.trino.Session) Test(org.testng.annotations.Test) BaseJdbcConnectorTest(io.trino.plugin.jdbc.BaseJdbcConnectorTest)

Example 24 with FilterNode

use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.

the class ExtractDereferencesFromFilterAboveScan method apply.

@Override
public Result apply(FilterNode node, Captures captures, Context context) {
    Set<SubscriptExpression> dereferences = extractRowSubscripts(ImmutableList.of(node.getPredicate()), true, context.getSession(), typeAnalyzer, context.getSymbolAllocator().getTypes());
    if (dereferences.isEmpty()) {
        return Result.empty();
    }
    Assignments assignments = Assignments.of(dereferences, context.getSession(), context.getSymbolAllocator(), typeAnalyzer);
    Map<Expression, SymbolReference> mappings = HashBiMap.create(assignments.getMap()).inverse().entrySet().stream().collect(toImmutableMap(Map.Entry::getKey, entry -> entry.getValue().toSymbolReference()));
    PlanNode source = node.getSource();
    return Result.ofPlanNode(new ProjectNode(context.getIdAllocator().getNextId(), new FilterNode(context.getIdAllocator().getNextId(), new ProjectNode(context.getIdAllocator().getNextId(), source, Assignments.builder().putIdentities(source.getOutputSymbols()).putAll(assignments).build()), replaceExpression(node.getPredicate(), mappings)), Assignments.identity(node.getOutputSymbols())));
}
Also used : Patterns.filter(io.trino.sql.planner.plan.Patterns.filter) Capture.newCapture(io.trino.matching.Capture.newCapture) FilterNode(io.trino.sql.planner.plan.FilterNode) PlanNode(io.trino.sql.planner.plan.PlanNode) SubscriptExpression(io.trino.sql.tree.SubscriptExpression) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) Objects.requireNonNull(java.util.Objects.requireNonNull) Rule(io.trino.sql.planner.iterative.Rule) ProjectNode(io.trino.sql.planner.plan.ProjectNode) TableScanNode(io.trino.sql.planner.plan.TableScanNode) Assignments(io.trino.sql.planner.plan.Assignments) Set(java.util.Set) Patterns.tableScan(io.trino.sql.planner.plan.Patterns.tableScan) Capture(io.trino.matching.Capture) DereferencePushdown.extractRowSubscripts(io.trino.sql.planner.iterative.rule.DereferencePushdown.extractRowSubscripts) HashBiMap(com.google.common.collect.HashBiMap) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Pattern(io.trino.matching.Pattern) TypeAnalyzer(io.trino.sql.planner.TypeAnalyzer) Patterns.source(io.trino.sql.planner.plan.Patterns.source) SymbolReference(io.trino.sql.tree.SymbolReference) Captures(io.trino.matching.Captures) ExpressionNodeInliner.replaceExpression(io.trino.sql.planner.ExpressionNodeInliner.replaceExpression) Expression(io.trino.sql.tree.Expression) PlanNode(io.trino.sql.planner.plan.PlanNode) SubscriptExpression(io.trino.sql.tree.SubscriptExpression) ExpressionNodeInliner.replaceExpression(io.trino.sql.planner.ExpressionNodeInliner.replaceExpression) Expression(io.trino.sql.tree.Expression) SymbolReference(io.trino.sql.tree.SymbolReference) FilterNode(io.trino.sql.planner.plan.FilterNode) Assignments(io.trino.sql.planner.plan.Assignments) SubscriptExpression(io.trino.sql.tree.SubscriptExpression) ProjectNode(io.trino.sql.planner.plan.ProjectNode) Map(java.util.Map) HashBiMap(com.google.common.collect.HashBiMap) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap)

Example 25 with FilterNode

use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.

the class ImplementExceptAll method apply.

@Override
public Result apply(ExceptNode node, Captures captures, Context context) {
    SetOperationNodeTranslator translator = new SetOperationNodeTranslator(context.getSession(), metadata, context.getSymbolAllocator(), context.getIdAllocator());
    SetOperationNodeTranslator.TranslationResult result = translator.makeSetContainmentPlanForAll(node);
    // compute expected multiplicity for every row
    checkState(result.getCountSymbols().size() > 0, "ExceptNode translation result has no count symbols");
    ResolvedFunction greatest = metadata.resolveFunction(context.getSession(), QualifiedName.of("greatest"), fromTypes(BIGINT, BIGINT));
    Expression count = result.getCountSymbols().get(0).toSymbolReference();
    for (int i = 1; i < result.getCountSymbols().size(); i++) {
        count = new FunctionCall(greatest.toQualifiedName(), ImmutableList.of(new ArithmeticBinaryExpression(SUBTRACT, count, result.getCountSymbols().get(i).toSymbolReference()), new GenericLiteral("BIGINT", "0")));
    }
    // filter rows so that expected number of rows remains
    Expression removeExtraRows = new ComparisonExpression(LESS_THAN_OR_EQUAL, result.getRowNumberSymbol().toSymbolReference(), count);
    FilterNode filter = new FilterNode(context.getIdAllocator().getNextId(), result.getPlanNode(), removeExtraRows);
    // prune helper symbols
    ProjectNode project = new ProjectNode(context.getIdAllocator().getNextId(), filter, Assignments.identity(node.getOutputSymbols()));
    return Result.ofPlanNode(project);
}
Also used : ArithmeticBinaryExpression(io.trino.sql.tree.ArithmeticBinaryExpression) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) ComparisonExpression(io.trino.sql.tree.ComparisonExpression) Expression(io.trino.sql.tree.Expression) ArithmeticBinaryExpression(io.trino.sql.tree.ArithmeticBinaryExpression) ResolvedFunction(io.trino.metadata.ResolvedFunction) FilterNode(io.trino.sql.planner.plan.FilterNode) ProjectNode(io.trino.sql.planner.plan.ProjectNode) FunctionCall(io.trino.sql.tree.FunctionCall) GenericLiteral(io.trino.sql.tree.GenericLiteral)

Aggregations

FilterNode (io.trino.sql.planner.plan.FilterNode)46 Expression (io.trino.sql.tree.Expression)33 PlanNode (io.trino.sql.planner.plan.PlanNode)20 ProjectNode (io.trino.sql.planner.plan.ProjectNode)20 Symbol (io.trino.sql.planner.Symbol)19 TableScanNode (io.trino.sql.planner.plan.TableScanNode)19 ComparisonExpression (io.trino.sql.tree.ComparisonExpression)19 ImmutableList (com.google.common.collect.ImmutableList)16 JoinNode (io.trino.sql.planner.plan.JoinNode)14 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)13 Session (io.trino.Session)11 ColumnHandle (io.trino.spi.connector.ColumnHandle)11 Test (org.testng.annotations.Test)10 SemiJoinNode (io.trino.sql.planner.plan.SemiJoinNode)9 NotExpression (io.trino.sql.tree.NotExpression)9 Map (java.util.Map)9 ValuesNode (io.trino.sql.planner.plan.ValuesNode)8 DomainTranslator (io.trino.sql.planner.DomainTranslator)7 List (java.util.List)7 InListExpression (io.trino.sql.tree.InListExpression)6