use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestLocalExchange method testPartitionCustomPartitioning.
@Test(dataProvider = "executionStrategy")
public void testPartitionCustomPartitioning(PipelineExecutionStrategy executionStrategy) {
ConnectorPartitioningHandle connectorPartitioningHandle = new ConnectorPartitioningHandle() {
};
ConnectorNodePartitioningProvider connectorNodePartitioningProvider = new ConnectorNodePartitioningProvider() {
@Override
public ConnectorBucketNodeMap getBucketNodeMap(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle) {
return createBucketNodeMap(2);
}
@Override
public ToIntFunction<ConnectorSplit> getSplitBucketFunction(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle) {
throw new UnsupportedOperationException();
}
@Override
public BucketFunction getBucketFunction(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle, List<Type> partitionChannelTypes, int bucketCount) {
return (page, position) -> {
long rowValue = BIGINT.getLong(page.getBlock(0), position);
if (rowValue == 42) {
return 0;
}
return 1;
};
}
};
List<Type> types = ImmutableList.of(VARCHAR, BIGINT);
nodePartitioningManager.addPartitioningProvider(new CatalogName("foo"), connectorNodePartitioningProvider);
PartitioningHandle partitioningHandle = new PartitioningHandle(Optional.of(new CatalogName("foo")), Optional.of(TestingTransactionHandle.create()), connectorPartitioningHandle);
LocalExchangeFactory localExchangeFactory = new LocalExchangeFactory(nodePartitioningManager, SESSION, partitioningHandle, 2, types, ImmutableList.of(1), Optional.empty(), executionStrategy, LOCAL_EXCHANGE_MAX_BUFFERED_BYTES, TYPE_OPERATOR_FACTORY);
LocalExchangeSinkFactoryId localExchangeSinkFactoryId = localExchangeFactory.newSinkFactoryId();
localExchangeFactory.noMoreSinkFactories();
run(localExchangeFactory, executionStrategy, exchange -> {
assertEquals(exchange.getBufferCount(), 2);
assertExchangeTotalBufferedBytes(exchange, 0);
LocalExchangeSinkFactory sinkFactory = exchange.getSinkFactory(localExchangeSinkFactoryId);
LocalExchangeSink sink = sinkFactory.createSink();
assertSinkCanWrite(sink);
sinkFactory.close();
sinkFactory.noMoreSinkFactories();
LocalExchangeSource sourceA = exchange.getSource(1);
assertSource(sourceA, 0);
LocalExchangeSource sourceB = exchange.getSource(0);
assertSource(sourceB, 0);
Page pageA = SequencePageBuilder.createSequencePage(types, 1, 100, 42);
sink.addPage(pageA);
assertSource(sourceA, 1);
assertSource(sourceB, 0);
assertRemovePage(types, sourceA, pageA);
assertSource(sourceA, 0);
Page pageB = SequencePageBuilder.createSequencePage(types, 100, 100, 43);
sink.addPage(pageB);
assertSource(sourceA, 0);
assertSource(sourceB, 1);
assertRemovePage(types, sourceB, pageB);
assertSource(sourceB, 0);
});
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestConnectorPushdownRulesWithHive method testPushdownWithDuplicateExpressions.
@Test
public void testPushdownWithDuplicateExpressions() {
String tableName = "duplicate_expressions";
tester().getQueryRunner().execute(format("CREATE TABLE %s (struct_of_bigint, just_bigint) AS SELECT cast(row(5, 6) AS row(a bigint, b bigint)) AS struct_of_int, 5 AS just_bigint WHERE false", tableName));
PushProjectionIntoTableScan pushProjectionIntoTableScan = new PushProjectionIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer(), new ScalarStatsCalculator(tester().getPlannerContext(), tester().getTypeAnalyzer()));
HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false));
HiveColumnHandle bigintColumn = createBaseColumn("just_bigint", 1, toHiveType(BIGINT), BIGINT, REGULAR, Optional.empty());
HiveColumnHandle partialColumn = new HiveColumnHandle("struct_of_bigint", 0, toHiveType(ROW_TYPE), ROW_TYPE, Optional.of(new HiveColumnProjectionInfo(ImmutableList.of(0), ImmutableList.of("a"), toHiveType(BIGINT), BIGINT)), REGULAR, Optional.empty());
// Test projection pushdown with duplicate column references
tester().assertThat(pushProjectionIntoTableScan).on(p -> {
SymbolReference column = p.symbol("just_bigint", BIGINT).toSymbolReference();
Expression negation = new ArithmeticUnaryExpression(MINUS, column);
return p.project(Assignments.of(// The column reference is part of both the assignments
p.symbol("column_ref", BIGINT), column, p.symbol("negated_column_ref", BIGINT), negation), p.tableScan(table, ImmutableList.of(p.symbol("just_bigint", BIGINT)), ImmutableMap.of(p.symbol("just_bigint", BIGINT), bigintColumn)));
}).matches(project(ImmutableMap.of("column_ref", expression("just_bigint_0"), "negated_column_ref", expression("- just_bigint_0")), tableScan(hiveTable.withProjectedColumns(ImmutableSet.of(bigintColumn))::equals, TupleDomain.all(), ImmutableMap.of("just_bigint_0", bigintColumn::equals))));
// Test Dereference pushdown
tester().assertThat(pushProjectionIntoTableScan).on(p -> {
SubscriptExpression subscript = new SubscriptExpression(p.symbol("struct_of_bigint", ROW_TYPE).toSymbolReference(), new LongLiteral("1"));
Expression sum = new ArithmeticBinaryExpression(ADD, subscript, new LongLiteral("2"));
return p.project(Assignments.of(// The subscript expression instance is part of both the assignments
p.symbol("expr_deref", BIGINT), subscript, p.symbol("expr_deref_2", BIGINT), sum), p.tableScan(table, ImmutableList.of(p.symbol("struct_of_bigint", ROW_TYPE)), ImmutableMap.of(p.symbol("struct_of_bigint", ROW_TYPE), partialColumn.getBaseColumn())));
}).matches(project(ImmutableMap.of("expr_deref", expression(new SymbolReference("struct_of_bigint#a")), "expr_deref_2", expression(new ArithmeticBinaryExpression(ADD, new SymbolReference("struct_of_bigint#a"), new LongLiteral("2")))), tableScan(hiveTable.withProjectedColumns(ImmutableSet.of(partialColumn))::equals, TupleDomain.all(), ImmutableMap.of("struct_of_bigint#a", partialColumn::equals))));
metastore.dropTable(SCHEMA_NAME, tableName, true);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestConnectorPushdownRulesWithHive method testProjectionPushdown.
@Test
public void testProjectionPushdown() {
String tableName = "projection_test";
PushProjectionIntoTableScan pushProjectionIntoTableScan = new PushProjectionIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer(), new ScalarStatsCalculator(tester().getPlannerContext(), tester().getTypeAnalyzer()));
tester().getQueryRunner().execute(format("CREATE TABLE %s (struct_of_int) AS " + "SELECT cast(row(5, 6) as row(a bigint, b bigint)) as struct_of_int where false", tableName));
Type baseType = ROW_TYPE;
HiveColumnHandle partialColumn = new HiveColumnHandle("struct_of_int", 0, toHiveType(baseType), baseType, Optional.of(new HiveColumnProjectionInfo(ImmutableList.of(0), ImmutableList.of("a"), toHiveType(BIGINT), BIGINT)), REGULAR, Optional.empty());
HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false));
HiveColumnHandle fullColumn = partialColumn.getBaseColumn();
// Test projected columns pushdown to HiveTableHandle in case of full column references
tester().assertThat(pushProjectionIntoTableScan).on(p -> p.project(Assignments.of(p.symbol("struct_of_int", baseType), p.symbol("struct_of_int", baseType).toSymbolReference()), p.tableScan(table, ImmutableList.of(p.symbol("struct_of_int", baseType)), ImmutableMap.of(p.symbol("struct_of_int", baseType), fullColumn)))).matches(project(ImmutableMap.of("expr", expression("col")), tableScan(hiveTable.withProjectedColumns(ImmutableSet.of(fullColumn))::equals, TupleDomain.all(), ImmutableMap.of("col", fullColumn::equals))));
// Rule should return Optional.empty after projected ColumnHandles have been added to HiveTableHandle
tester().assertThat(pushProjectionIntoTableScan).on(p -> p.project(Assignments.of(p.symbol("struct_of_int", baseType), p.symbol("struct_of_int", baseType).toSymbolReference()), p.tableScan(new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable.withProjectedColumns(ImmutableSet.of(fullColumn)), new HiveTransactionHandle(false)), ImmutableList.of(p.symbol("struct_of_int", baseType)), ImmutableMap.of(p.symbol("struct_of_int", baseType), fullColumn)))).doesNotFire();
// Test Dereference pushdown
tester().assertThat(pushProjectionIntoTableScan).on(p -> p.project(Assignments.of(p.symbol("expr_deref", BIGINT), new SubscriptExpression(p.symbol("struct_of_int", baseType).toSymbolReference(), new LongLiteral("1"))), p.tableScan(table, ImmutableList.of(p.symbol("struct_of_int", baseType)), ImmutableMap.of(p.symbol("struct_of_int", baseType), fullColumn)))).matches(project(ImmutableMap.of("expr_deref", expression(new SymbolReference("struct_of_int#a"))), tableScan(hiveTable.withProjectedColumns(ImmutableSet.of(partialColumn))::equals, TupleDomain.all(), ImmutableMap.of("struct_of_int#a", partialColumn::equals))));
metastore.dropTable(SCHEMA_NAME, tableName, true);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class QueryPlanner method planExpand.
public RelationPlan planExpand(Query query) {
checkArgument(analysis.isExpandableQuery(query), "query is not registered as expandable");
Union union = (Union) query.getQueryBody();
ImmutableList.Builder<NodeAndMappings> recursionSteps = ImmutableList.builder();
// plan anchor relation
Relation anchorNode = union.getRelations().get(0);
RelationPlan anchorPlan = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, outerContext, session, recursiveSubqueries).process(anchorNode, null);
// prune anchor plan outputs to contain only the symbols exposed in the scope
NodeAndMappings prunedAnchorPlan = pruneInvisibleFields(anchorPlan, idAllocator);
// if the anchor plan has duplicate output symbols, add projection on top to make the symbols unique
// This is necessary to successfully unroll recursion: the recursion step relation must follow
// the same layout while it might not have duplicate outputs where the anchor plan did
NodeAndMappings disambiguatedAnchorPlan = disambiguateOutputs(prunedAnchorPlan, symbolAllocator, idAllocator);
anchorPlan = new RelationPlan(disambiguatedAnchorPlan.getNode(), analysis.getScope(query), disambiguatedAnchorPlan.getFields(), outerContext);
recursionSteps.add(copy(anchorPlan.getRoot(), anchorPlan.getFieldMappings()));
// plan recursion step
Relation recursionStepRelation = union.getRelations().get(1);
RelationPlan recursionStepPlan = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, outerContext, session, ImmutableMap.of(NodeRef.of(analysis.getRecursiveReference(query)), anchorPlan)).process(recursionStepRelation, null);
// coerce recursion step outputs and prune them to contain only the symbols exposed in the scope
NodeAndMappings coercedRecursionStep;
List<Type> types = analysis.getRelationCoercion(recursionStepRelation);
if (types == null) {
coercedRecursionStep = pruneInvisibleFields(recursionStepPlan, idAllocator);
} else {
coercedRecursionStep = coerce(recursionStepPlan, types, symbolAllocator, idAllocator);
}
NodeAndMappings replacementSpot = new NodeAndMappings(anchorPlan.getRoot(), anchorPlan.getFieldMappings());
PlanNode recursionStep = coercedRecursionStep.getNode();
List<Symbol> mappings = coercedRecursionStep.getFields();
// unroll recursion
int maxRecursionDepth = getMaxRecursionDepth(session);
for (int i = 0; i < maxRecursionDepth; i++) {
recursionSteps.add(copy(recursionStep, mappings));
NodeAndMappings replacement = copy(recursionStep, mappings);
// if the recursion step plan has duplicate output symbols, add projection on top to make the symbols unique
// This is necessary to successfully unroll recursion: the relation on the next recursion step must follow
// the same layout while it might not have duplicate outputs where the plan for this step did
replacement = disambiguateOutputs(replacement, symbolAllocator, idAllocator);
recursionStep = replace(recursionStep, replacementSpot, replacement);
replacementSpot = replacement;
}
// after the last recursion step, check if the recursion converged. the last step is expected to return empty result
// 1. append window to count rows
NodeAndMappings checkConvergenceStep = copy(recursionStep, mappings);
Symbol countSymbol = symbolAllocator.newSymbol("count", BIGINT);
ResolvedFunction function = plannerContext.getMetadata().resolveFunction(session, QualifiedName.of("count"), ImmutableList.of());
WindowNode.Function countFunction = new WindowNode.Function(function, ImmutableList.of(), DEFAULT_FRAME, false);
WindowNode windowNode = new WindowNode(idAllocator.getNextId(), checkConvergenceStep.getNode(), new WindowNode.Specification(ImmutableList.of(), Optional.empty()), ImmutableMap.of(countSymbol, countFunction), Optional.empty(), ImmutableSet.of(), 0);
// 2. append filter to fail on non-empty result
ResolvedFunction fail = plannerContext.getMetadata().resolveFunction(session, QualifiedName.of("fail"), fromTypes(VARCHAR));
String recursionLimitExceededMessage = format("Recursion depth limit exceeded (%s). Use 'max_recursion_depth' session property to modify the limit.", maxRecursionDepth);
Expression predicate = new IfExpression(new ComparisonExpression(GREATER_THAN_OR_EQUAL, countSymbol.toSymbolReference(), new GenericLiteral("BIGINT", "0")), new Cast(new FunctionCall(fail.toQualifiedName(), ImmutableList.of(new Cast(new StringLiteral(recursionLimitExceededMessage), toSqlType(VARCHAR)))), toSqlType(BOOLEAN)), TRUE_LITERAL);
FilterNode filterNode = new FilterNode(idAllocator.getNextId(), windowNode, predicate);
recursionSteps.add(new NodeAndMappings(filterNode, checkConvergenceStep.getFields()));
// union all the recursion steps
List<NodeAndMappings> recursionStepsToUnion = recursionSteps.build();
List<Symbol> unionOutputSymbols = anchorPlan.getFieldMappings().stream().map(symbol -> symbolAllocator.newSymbol(symbol, "_expanded")).collect(toImmutableList());
ImmutableListMultimap.Builder<Symbol, Symbol> unionSymbolMapping = ImmutableListMultimap.builder();
for (NodeAndMappings plan : recursionStepsToUnion) {
for (int i = 0; i < unionOutputSymbols.size(); i++) {
unionSymbolMapping.put(unionOutputSymbols.get(i), plan.getFields().get(i));
}
}
List<PlanNode> nodesToUnion = recursionStepsToUnion.stream().map(NodeAndMappings::getNode).collect(toImmutableList());
PlanNode result = new UnionNode(idAllocator.getNextId(), nodesToUnion, unionSymbolMapping.build(), unionOutputSymbols);
if (union.isDistinct()) {
result = new AggregationNode(idAllocator.getNextId(), result, ImmutableMap.of(), singleGroupingSet(result.getOutputSymbols()), ImmutableList.of(), AggregationNode.Step.SINGLE, Optional.empty(), Optional.empty());
}
return new RelationPlan(result, anchorPlan.getScope(), unionOutputSymbols, outerContext);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestPushPredicateIntoTableScan method consumesDeterministicPredicateIfNewDomainIsSame.
@Test
public void consumesDeterministicPredicateIfNewDomainIsSame() {
ColumnHandle columnHandle = new TpchColumnHandle("nationkey", BIGINT);
tester().assertThat(pushPredicateIntoTableScan).on(p -> p.filter(expression("nationkey = BIGINT '44'"), p.tableScan(nationTableHandle, ImmutableList.of(p.symbol("nationkey", BIGINT)), ImmutableMap.of(p.symbol("nationkey", BIGINT), columnHandle), TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle, NullableValue.of(BIGINT, (long) 44)))))).matches(constrainedTableScanWithTableLayout("nation", ImmutableMap.of("nationkey", singleValue(BIGINT, (long) 44)), ImmutableMap.of("nationkey", "nationkey")));
}
Aggregations