use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class DeltaLakeMetadata method getUpdateRowIdColumnHandle.
// The rowId is a RowType where the first field is a BigInt containing the row number. The second field contains the values of any columns that were unmodified.
// They are needed to write the complete row after modifications in DeltaLakeUpdatablePageSource. If there are no unmodified columns, the rowId only has one field.
@Override
public ColumnHandle getUpdateRowIdColumnHandle(ConnectorSession session, ConnectorTableHandle tableHandle, List<ColumnHandle> updatedColumns) {
DeltaLakeTableHandle handle = (DeltaLakeTableHandle) tableHandle;
List<DeltaLakeColumnHandle> unmodifiedColumns = getUnmodifiedColumns(handle, updatedColumns);
Type rowIdType;
if (unmodifiedColumns.isEmpty()) {
rowIdType = RowType.rowType(RowType.field(BIGINT));
} else {
List<RowType.Field> unmodifiedColumnFields = unmodifiedColumns.stream().map(columnMetadata -> RowType.field(columnMetadata.getName(), columnMetadata.getType())).collect(toImmutableList());
rowIdType = RowType.rowType(RowType.field(BIGINT), RowType.field(RowType.from(unmodifiedColumnFields)));
}
return new DeltaLakeColumnHandle(ROW_ID_COLUMN_NAME, rowIdType, SYNTHESIZED);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestHivePartitionedBucketFunction method testMultiplePartitions.
@Test(dataProvider = "hiveBucketingVersion")
public void testMultiplePartitions(BucketingVersion hiveBucketingVersion) {
int numValues = 1024;
int numBuckets = 10;
Block bucketColumn = createLongSequenceBlockWithNull(numValues);
Page bucketedColumnPage = new Page(bucketColumn);
BucketFunction hiveBucketFunction = bucketFunction(hiveBucketingVersion, numBuckets, ImmutableList.of(HIVE_LONG));
int numPartitions = 8;
List<Long> partitionValues = new ArrayList<>();
for (int i = 0; i < numPartitions - 1; i++) {
partitionValues.addAll(Collections.nCopies(numValues / numPartitions, i * 348349L));
}
partitionValues.addAll(Collections.nCopies(numValues / numPartitions, null));
Block partitionColumn = createLongsBlock(partitionValues);
Page page = new Page(bucketColumn, partitionColumn);
Map<Long, HashMultimap<Integer, Integer>> partitionedBucketPositions = new HashMap<>();
for (int i = 0; i < numValues; i++) {
int hiveBucket = hiveBucketFunction.getBucket(bucketedColumnPage, i);
Long hivePartition = partitionValues.get(i);
// record list of positions for each combination of hive partition and bucket
partitionedBucketPositions.computeIfAbsent(hivePartition, ignored -> HashMultimap.create()).put(hiveBucket, i);
}
BucketFunction hivePartitionedBucketFunction = partitionedBucketFunction(hiveBucketingVersion, numBuckets, ImmutableList.of(HIVE_LONG), ImmutableList.of(BIGINT), 4000);
// All positions of a hive partition and bucket should hash to the same partitioned bucket
for (Map.Entry<Long, HashMultimap<Integer, Integer>> partitionEntry : partitionedBucketPositions.entrySet()) {
for (Map.Entry<Integer, Collection<Integer>> entry : partitionEntry.getValue().asMap().entrySet()) {
assertBucketCount(hivePartitionedBucketFunction, page, entry.getValue(), 1);
}
}
assertBucketCount(hivePartitionedBucketFunction, page, IntStream.range(0, numValues).boxed().collect(toImmutableList()), numBuckets * numPartitions);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class BaseIcebergConnectorTest method testStatisticsConstraints.
@Test
public void testStatisticsConstraints() {
String tableName = "iceberg.tpch.test_simple_partitioned_table_statistics";
assertUpdate("CREATE TABLE iceberg.tpch.test_simple_partitioned_table_statistics (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col1'])");
String insertStart = "INSERT INTO iceberg.tpch.test_simple_partitioned_table_statistics";
assertUpdate(insertStart + " VALUES (1, 101), (2, 102), (3, 103), (4, 104)", 4);
TableStatistics tableStatistics = getTableStatistics(tableName, new Constraint(TupleDomain.all()));
IcebergColumnHandle col1Handle = getColumnHandleFromStatistics(tableStatistics, "col1");
IcebergColumnHandle col2Handle = getColumnHandleFromStatistics(tableStatistics, "col2");
// Constraint.predicate is currently not supported, because it's never provided by the engine.
// TODO add (restore) test coverage when this changes.
// predicate on a partition column
assertThatThrownBy(() -> getTableStatistics(tableName, new Constraint(TupleDomain.all(), new TestRelationalNumberPredicate("col1", 3, i1 -> i1 >= 0), Set.of(col1Handle)))).isInstanceOf(VerifyException.class).hasMessage("Unexpected Constraint predicate");
// predicate on a non-partition column
assertThatThrownBy(() -> getTableStatistics(tableName, new Constraint(TupleDomain.all(), new TestRelationalNumberPredicate("col2", 102, i -> i >= 0), Set.of(col2Handle)))).isInstanceOf(VerifyException.class).hasMessage("Unexpected Constraint predicate");
dropTable(tableName);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestCreateTableTask method testCreateTableNotExistsTrue.
@Test
public void testCreateTableNotExistsTrue() {
CreateTable statement = new CreateTable(QualifiedName.of("test_table"), ImmutableList.of(new ColumnDefinition(identifier("a"), toSqlType(BIGINT), true, emptyList(), Optional.empty())), true, ImmutableList.of(), Optional.empty());
CreateTableTask createTableTask = new CreateTableTask(plannerContext, new AllowAllAccessControl(), columnPropertyManager, tablePropertyManager);
getFutureValue(createTableTask.internalExecute(statement, testSession, emptyList(), output -> {
}));
assertEquals(metadata.getCreateTableCallCount(), 1);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestValuesNodeStats method testStatsForValuesNode.
@Test
public void testStatsForValuesNode() {
tester().assertStatsFor(pb -> pb.values(ImmutableList.of(pb.symbol("a", BIGINT), pb.symbol("b", DOUBLE)), ImmutableList.of(ImmutableList.of(expression("3+3"), expression("13.5e0")), ImmutableList.of(expression("55"), expression("null")), ImmutableList.of(expression("6"), expression("13.5e0"))))).check(outputStats -> outputStats.equalTo(PlanNodeStatsEstimate.builder().setOutputRowCount(3).addSymbolStatistics(new Symbol("a"), SymbolStatsEstimate.builder().setNullsFraction(0).setLowValue(6).setHighValue(55).setDistinctValuesCount(2).build()).addSymbolStatistics(new Symbol("b"), SymbolStatsEstimate.builder().setNullsFraction(0.33333333333333333).setLowValue(13.5).setHighValue(13.5).setDistinctValuesCount(1).build()).build()));
tester().assertStatsFor(pb -> pb.values(ImmutableList.of(pb.symbol("v", createVarcharType(30))), ImmutableList.of(ImmutableList.of(expression("'Alice'")), ImmutableList.of(expression("'has'")), ImmutableList.of(expression("'a cat'")), ImmutableList.of(expression("null"))))).check(outputStats -> outputStats.equalTo(PlanNodeStatsEstimate.builder().setOutputRowCount(4).addSymbolStatistics(new Symbol("v"), SymbolStatsEstimate.builder().setNullsFraction(0.25).setDistinctValuesCount(3).build()).build()));
}
Aggregations