use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class FormatFunction method valueConverter.
private static BiFunction<ConnectorSession, Block, Object> valueConverter(FunctionDependencies functionDependencies, Type type, int position) {
if (type.equals(UNKNOWN)) {
return (session, block) -> null;
}
if (type.equals(BOOLEAN)) {
return (session, block) -> type.getBoolean(block, position);
}
if (type.equals(TINYINT) || type.equals(SMALLINT) || type.equals(INTEGER) || type.equals(BIGINT)) {
return (session, block) -> type.getLong(block, position);
}
if (type.equals(REAL)) {
return (session, block) -> intBitsToFloat(toIntExact(type.getLong(block, position)));
}
if (type.equals(DOUBLE)) {
return (session, block) -> type.getDouble(block, position);
}
if (type.equals(DATE)) {
return (session, block) -> LocalDate.ofEpochDay(type.getLong(block, position));
}
if (type instanceof TimestampWithTimeZoneType) {
return (session, block) -> toZonedDateTime(((TimestampWithTimeZoneType) type), block, position);
}
if (type instanceof TimestampType) {
return (session, block) -> toLocalDateTime(((TimestampType) type), block, position);
}
if (type instanceof TimeType) {
return (session, block) -> toLocalTime(type.getLong(block, position));
}
// TODO: support TIME WITH TIME ZONE by https://github.com/trinodb/trino/issues/191 + mapping to java.time.OffsetTime
if (type.equals(JSON)) {
MethodHandle handle = functionDependencies.getFunctionInvoker(QualifiedName.of("json_format"), ImmutableList.of(JSON), simpleConvention(FAIL_ON_NULL, NEVER_NULL)).getMethodHandle();
return (session, block) -> convertToString(handle, type.getSlice(block, position));
}
if (isShortDecimal(type)) {
int scale = ((DecimalType) type).getScale();
return (session, block) -> BigDecimal.valueOf(type.getLong(block, position), scale);
}
if (isLongDecimal(type)) {
int scale = ((DecimalType) type).getScale();
return (session, block) -> new BigDecimal(((Int128) type.getObject(block, position)).toBigInteger(), scale);
}
if (type instanceof VarcharType) {
return (session, block) -> type.getSlice(block, position).toStringUtf8();
}
if (type instanceof CharType) {
CharType charType = (CharType) type;
return (session, block) -> padSpaces(type.getSlice(block, position), charType).toStringUtf8();
}
BiFunction<ConnectorSession, Block, Object> function;
if (type.getJavaType() == long.class) {
function = (session, block) -> type.getLong(block, position);
} else if (type.getJavaType() == double.class) {
function = (session, block) -> type.getDouble(block, position);
} else if (type.getJavaType() == boolean.class) {
function = (session, block) -> type.getBoolean(block, position);
} else if (type.getJavaType() == Slice.class) {
function = (session, block) -> type.getSlice(block, position);
} else {
function = (session, block) -> type.getObject(block, position);
}
MethodHandle handle = functionDependencies.getCastInvoker(type, VARCHAR, simpleConvention(FAIL_ON_NULL, NEVER_NULL)).getMethodHandle();
return (session, block) -> convertToString(handle, function.apply(session, block));
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, PlanNode source, List<Symbol> symbols, WriterTarget target, List<String> columnNames, List<ColumnMetadata> columnMetadataList, Optional<TableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
Optional<PartitioningScheme> preferredPartitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
Optional<PartitioningHandle> partitioningHandle = writeTableLayout.get().getPartitioning();
if (partitioningHandle.isPresent()) {
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle.get(), partitionFunctionArguments), outputLayout));
} else {
// empty connector partitioning handle means evenly partitioning on partitioning columns
preferredPartitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout));
}
}
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
Set<Symbol> notNullColumnSymbols = columnMetadataList.stream().filter(column -> !column.isNullable()).map(ColumnMetadata::getName).map(columnToSymbolMap::get).collect(toImmutableSet());
if (!statisticsMetadata.isEmpty()) {
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(symbolAllocator, plannerContext);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get))), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.empty(), Optional.empty()), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class QueryPlanner method planGroupingSets.
private GroupingSetsPlan planGroupingSets(PlanBuilder subPlan, QuerySpecification node, GroupingSetAnalysis groupingSetAnalysis) {
Map<Symbol, Symbol> groupingSetMappings = new LinkedHashMap<>();
// Compute a set of artificial columns that will contain the values of the original columns
// filtered by whether the column is included in the grouping set
// This will become the basis for the scope for any column references
Symbol[] fields = new Symbol[subPlan.getTranslations().getFieldSymbols().size()];
for (FieldId field : groupingSetAnalysis.getAllFields()) {
Symbol input = subPlan.getTranslations().getFieldSymbols().get(field.getFieldIndex());
Symbol output = symbolAllocator.newSymbol(input, "gid");
fields[field.getFieldIndex()] = output;
groupingSetMappings.put(output, input);
}
Map<ScopeAware<Expression>, Symbol> complexExpressions = new HashMap<>();
for (Expression expression : groupingSetAnalysis.getComplexExpressions()) {
if (!complexExpressions.containsKey(scopeAwareKey(expression, analysis, subPlan.getScope()))) {
Symbol input = subPlan.translate(expression);
Symbol output = symbolAllocator.newSymbol(expression, analysis.getType(expression), "gid");
complexExpressions.put(scopeAwareKey(expression, analysis, subPlan.getScope()), output);
groupingSetMappings.put(output, input);
}
}
// For the purpose of "distinct", we need to canonicalize column references that may have varying
// syntactic forms (e.g., "t.a" vs "a"). Thus we need to enumerate grouping sets based on the underlying
// fieldId associated with each column reference expression.
// The catch is that simple group-by expressions can be arbitrary expressions (this is a departure from the SQL specification).
// But, they don't affect the number of grouping sets or the behavior of "distinct" . We can compute all the candidate
// grouping sets in terms of fieldId, dedup as appropriate and then cross-join them with the complex expressions.
// This tracks the grouping sets before complex expressions are considered.
// It's also used to compute the descriptors needed to implement grouping()
List<Set<FieldId>> columnOnlyGroupingSets = enumerateGroupingSets(groupingSetAnalysis);
if (node.getGroupBy().isPresent() && node.getGroupBy().get().isDistinct()) {
columnOnlyGroupingSets = columnOnlyGroupingSets.stream().distinct().collect(toImmutableList());
}
// translate from FieldIds to Symbols
List<List<Symbol>> sets = columnOnlyGroupingSets.stream().map(set -> set.stream().map(FieldId::getFieldIndex).map(index -> fields[index]).collect(toImmutableList())).collect(toImmutableList());
// combine (cartesian product) with complex expressions
List<List<Symbol>> groupingSets = sets.stream().map(set -> ImmutableList.<Symbol>builder().addAll(set).addAll(complexExpressions.values()).build()).collect(toImmutableList());
// Generate GroupIdNode (multiple grouping sets) or ProjectNode (single grouping set)
PlanNode groupId;
Optional<Symbol> groupIdSymbol = Optional.empty();
if (groupingSets.size() > 1) {
groupIdSymbol = Optional.of(symbolAllocator.newSymbol("groupId", BIGINT));
groupId = new GroupIdNode(idAllocator.getNextId(), subPlan.getRoot(), groupingSets, groupingSetMappings, subPlan.getRoot().getOutputSymbols(), groupIdSymbol.get());
} else {
Assignments.Builder assignments = Assignments.builder();
assignments.putIdentities(subPlan.getRoot().getOutputSymbols());
groupingSetMappings.forEach((key, value) -> assignments.put(key, value.toSymbolReference()));
groupId = new ProjectNode(idAllocator.getNextId(), subPlan.getRoot(), assignments.build());
}
subPlan = new PlanBuilder(subPlan.getTranslations().withNewMappings(complexExpressions, Arrays.asList(fields)), groupId);
return new GroupingSetsPlan(subPlan, columnOnlyGroupingSets, groupingSets, groupIdSymbol);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class QueryPlanner method plan.
public UpdateNode plan(Update node) {
Table table = node.getTable();
TableHandle handle = analysis.getTableHandle(table);
TableSchema tableSchema = plannerContext.getMetadata().getTableSchema(session, handle);
Map<String, ColumnHandle> columnMap = plannerContext.getMetadata().getColumnHandles(session, handle);
List<ColumnSchema> columnsSchemas = tableSchema.getColumns();
List<String> targetColumnNames = node.getAssignments().stream().map(assignment -> assignment.getName().getValue()).collect(toImmutableList());
// Create lists of columnnames and SET expressions, in table column order
ImmutableList.Builder<String> updatedColumnNamesBuilder = ImmutableList.builder();
ImmutableList.Builder<ColumnHandle> updatedColumnHandlesBuilder = ImmutableList.builder();
ImmutableList.Builder<Expression> orderedColumnValuesBuilder = ImmutableList.builder();
for (ColumnSchema columnSchema : columnsSchemas) {
String name = columnSchema.getName();
int index = targetColumnNames.indexOf(name);
if (index >= 0) {
updatedColumnNamesBuilder.add(name);
updatedColumnHandlesBuilder.add(requireNonNull(columnMap.get(name), "columnMap didn't contain name"));
orderedColumnValuesBuilder.add(node.getAssignments().get(index).getValue());
}
}
List<String> updatedColumnNames = updatedColumnNamesBuilder.build();
List<ColumnHandle> updatedColumnHandles = updatedColumnHandlesBuilder.build();
List<Expression> orderedColumnValues = orderedColumnValuesBuilder.build();
// create table scan
RelationPlan relationPlan = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, outerContext, session, recursiveSubqueries).process(table, null);
PlanBuilder builder = newPlanBuilder(relationPlan, analysis, lambdaDeclarationToSymbolMap);
if (node.getWhere().isPresent()) {
builder = filter(builder, node.getWhere().get(), node);
}
builder = subqueryPlanner.handleSubqueries(builder, orderedColumnValues, analysis.getSubqueries(node));
builder = builder.appendProjections(orderedColumnValues, symbolAllocator, idAllocator);
PlanAndMappings planAndMappings = coerce(builder, orderedColumnValues, analysis, idAllocator, symbolAllocator, typeCoercion);
builder = planAndMappings.getSubPlan();
ImmutableList.Builder<Symbol> updatedColumnValuesBuilder = ImmutableList.builder();
orderedColumnValues.forEach(columnValue -> updatedColumnValuesBuilder.add(planAndMappings.get(columnValue)));
Symbol rowId = builder.translate(analysis.getRowIdField(table));
updatedColumnValuesBuilder.add(rowId);
List<Symbol> outputs = ImmutableList.of(symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY));
Optional<PlanNodeId> tableScanId = getIdForLeftTableScan(relationPlan.getRoot());
checkArgument(tableScanId.isPresent(), "tableScanId not present");
// create update node
return new UpdateNode(idAllocator.getNextId(), builder.getRoot(), new UpdateTarget(Optional.empty(), plannerContext.getMetadata().getTableMetadata(session, handle).getTable(), updatedColumnNames, updatedColumnHandles), rowId, updatedColumnValuesBuilder.build(), outputs);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestHiveProjectionPushdownIntoTableScan method testDereferencePushdown.
@Test
public void testDereferencePushdown() {
String testTable = "test_simple_projection_pushdown";
QualifiedObjectName completeTableName = new QualifiedObjectName(HIVE_CATALOG_NAME, SCHEMA_NAME, testTable);
getQueryRunner().execute(format("CREATE TABLE %s (col0, col1) AS" + " SELECT cast(row(5, 6) as row(x bigint, y bigint)) AS col0, 5 AS col1 WHERE false", testTable));
Session session = getQueryRunner().getDefaultSession();
Optional<TableHandle> tableHandle = getTableHandle(session, completeTableName);
assertTrue(tableHandle.isPresent(), "expected the table handle to be present");
Map<String, ColumnHandle> columns = getColumnHandles(session, completeTableName);
HiveColumnHandle column0Handle = (HiveColumnHandle) columns.get("col0");
HiveColumnHandle column1Handle = (HiveColumnHandle) columns.get("col1");
HiveColumnHandle columnX = createProjectedColumnHandle(column0Handle, ImmutableList.of(0));
HiveColumnHandle columnY = createProjectedColumnHandle(column0Handle, ImmutableList.of(1));
// Simple Projection pushdown
assertPlan("SELECT col0.x expr_x, col0.y expr_y FROM " + testTable, any(tableScan(((HiveTableHandle) tableHandle.get().getConnectorHandle()).withProjectedColumns(ImmutableSet.of(columnX, columnY))::equals, TupleDomain.all(), ImmutableMap.of("col0#x", columnX::equals, "col0#y", columnY::equals))));
// Projection and predicate pushdown
assertPlan(format("SELECT col0.x FROM %s WHERE col0.x = col1 + 3 and col0.y = 2", testTable), anyTree(filter("col0_y = BIGINT '2' AND (col0_x = cast((col1 + 3) as BIGINT))", tableScan(table -> {
HiveTableHandle hiveTableHandle = (HiveTableHandle) table;
return hiveTableHandle.getCompactEffectivePredicate().equals(TupleDomain.withColumnDomains(ImmutableMap.of(columnY, Domain.singleValue(BIGINT, 2L)))) && hiveTableHandle.getProjectedColumns().equals(ImmutableSet.of(column1Handle, columnX, columnY));
}, TupleDomain.all(), ImmutableMap.of("col0_y", columnY::equals, "col0_x", columnX::equals, "col1", column1Handle::equals)))));
// Projection and predicate pushdown with overlapping columns
assertPlan(format("SELECT col0, col0.y expr_y FROM %s WHERE col0.x = 5", testTable), anyTree(filter("col0_x = BIGINT '5'", tableScan(table -> {
HiveTableHandle hiveTableHandle = (HiveTableHandle) table;
return hiveTableHandle.getCompactEffectivePredicate().equals(TupleDomain.withColumnDomains(ImmutableMap.of(columnX, Domain.singleValue(BIGINT, 5L)))) && hiveTableHandle.getProjectedColumns().equals(ImmutableSet.of(column0Handle, columnX));
}, TupleDomain.all(), ImmutableMap.of("col0", column0Handle::equals, "col0_x", columnX::equals)))));
// Projection and predicate pushdown with joins
assertPlan(format("SELECT T.col0.x, T.col0, T.col0.y FROM %s T join %s S on T.col1 = S.col1 WHERE (T.col0.x = 2)", testTable, testTable), anyTree(project(ImmutableMap.of("expr_0_x", expression("expr_0[1]"), "expr_0", expression("expr_0"), "expr_0_y", expression("expr_0[2]")), join(INNER, ImmutableList.of(equiJoinClause("t_expr_1", "s_expr_1")), anyTree(filter("expr_0_x = BIGINT '2'", tableScan(table -> ((HiveTableHandle) table).getCompactEffectivePredicate().getDomains().get().equals(ImmutableMap.of(columnX, Domain.singleValue(BIGINT, 2L))), TupleDomain.all(), ImmutableMap.of("expr_0_x", columnX::equals, "expr_0", column0Handle::equals, "t_expr_1", column1Handle::equals)))), anyTree(tableScan(((HiveTableHandle) tableHandle.get().getConnectorHandle()).withProjectedColumns(ImmutableSet.of(column1Handle))::equals, TupleDomain.all(), ImmutableMap.of("s_expr_1", column1Handle::equals)))))));
}
Aggregations