use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class BaseIcebergConnectorTest method testPartitionedTableStatistics.
@Test
public void testPartitionedTableStatistics() {
assertUpdate("CREATE TABLE iceberg.tpch.test_partitioned_table_statistics (col1 REAL, col2 BIGINT) WITH (partitioning = ARRAY['col2'])");
String insertStart = "INSERT INTO test_partitioned_table_statistics";
assertUpdate(insertStart + " VALUES (-10, -1)", 1);
assertUpdate(insertStart + " VALUES (100, 10)", 1);
MaterializedResult result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
MaterializedRow row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 0.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "100.0");
MaterializedRow row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
MaterializedRow row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 2.0);
assertUpdate(insertStart + " VALUES " + IntStream.rangeClosed(1, 5).mapToObj(i -> format("(%d, 10)", i + 100)).collect(joining(", ")), 5);
assertUpdate(insertStart + " VALUES " + IntStream.rangeClosed(6, 10).mapToObj(i -> "(NULL, 10)").collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 12.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 12.0);
assertUpdate(insertStart + " VALUES " + IntStream.rangeClosed(6, 10).mapToObj(i -> "(100, NULL)").collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 17.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 5.0 / 17.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 17.0);
dropTable("iceberg.tpch.test_partitioned_table_statistics");
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class BaseIcebergConnectorTest method testPredicatePushdown.
@Test
public void testPredicatePushdown() {
QualifiedObjectName tableName = new QualifiedObjectName("iceberg", "tpch", "test_predicate");
assertUpdate(format("CREATE TABLE %s (col1 BIGINT, col2 BIGINT, col3 BIGINT) WITH (partitioning = ARRAY['col2', 'col3'])", tableName));
assertUpdate(format("INSERT INTO %s VALUES (1, 10, 100)", tableName), 1L);
assertUpdate(format("INSERT INTO %s VALUES (2, 20, 200)", tableName), 1L);
assertQuery(format("SELECT * FROM %s WHERE col1 = 1", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(tableName, ImmutableMap.of("col1", singleValue(BIGINT, 1L)), ImmutableMap.of(), ImmutableMap.of("col1", singleValue(BIGINT, 1L)));
assertQuery(format("SELECT * FROM %s WHERE col2 = 10", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(tableName, ImmutableMap.of("col2", singleValue(BIGINT, 10L)), ImmutableMap.of("col2", singleValue(BIGINT, 10L)), ImmutableMap.of());
assertQuery(format("SELECT * FROM %s WHERE col1 = 1 AND col2 = 10", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(tableName, ImmutableMap.of("col1", singleValue(BIGINT, 1L), "col2", singleValue(BIGINT, 10L)), ImmutableMap.of("col2", singleValue(BIGINT, 10L)), ImmutableMap.of("col1", singleValue(BIGINT, 1L)));
// Assert pushdown for an IN predicate with value count above the default compaction threshold
List<Long> values = LongStream.range(1L, 1010L).boxed().filter(index -> index != 20L).collect(toImmutableList());
assertTrue(values.size() > ICEBERG_DOMAIN_COMPACTION_THRESHOLD);
String valuesString = join(",", values.stream().map(Object::toString).collect(toImmutableList()));
String inPredicate = "%s IN (" + valuesString + ")";
assertQuery(format("SELECT * FROM %s WHERE %s AND %s", tableName, format(inPredicate, "col1"), format(inPredicate, "col2")), "VALUES (1, 10, 100)");
assertFilterPushdown(tableName, ImmutableMap.of("col1", multipleValues(BIGINT, values), "col2", multipleValues(BIGINT, values)), ImmutableMap.of("col2", multipleValues(BIGINT, values)), // Unenforced predicate is simplified during split generation, but not reflected here
ImmutableMap.of("col1", multipleValues(BIGINT, values)));
dropTable(tableName.getObjectName());
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class PhoenixClient method beginCreateTable.
@Override
public JdbcOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schema = schemaTableName.getSchemaName();
String table = schemaTableName.getTableName();
if (!getSchemaNames(session).contains(schema)) {
throw new SchemaNotFoundException(schema);
}
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
schema = getIdentifierMapping().toRemoteSchemaName(identity, connection, schema);
table = getIdentifierMapping().toRemoteTableName(identity, connection, schema, table);
schema = toPhoenixSchemaName(schema);
LinkedList<ColumnMetadata> tableColumns = new LinkedList<>(tableMetadata.getColumns());
Map<String, Object> tableProperties = tableMetadata.getProperties();
Optional<Boolean> immutableRows = PhoenixTableProperties.getImmutableRows(tableProperties);
String immutable = immutableRows.isPresent() && immutableRows.get() ? "IMMUTABLE" : "";
ImmutableList.Builder<String> columnNames = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
ImmutableList.Builder<String> columnList = ImmutableList.builder();
Set<ColumnMetadata> rowkeyColumns = tableColumns.stream().filter(col -> isPrimaryKey(col, tableProperties)).collect(toSet());
ImmutableList.Builder<String> pkNames = ImmutableList.builder();
Optional<String> rowkeyColumn = Optional.empty();
if (rowkeyColumns.isEmpty()) {
// Add a rowkey when not specified in DDL
columnList.add(ROWKEY + " bigint not null");
pkNames.add(ROWKEY);
execute(session, format("CREATE SEQUENCE %s", getEscapedTableName(schema, table + "_sequence")));
rowkeyColumn = Optional.of(ROWKEY);
}
for (ColumnMetadata column : tableColumns) {
String columnName = getIdentifierMapping().toRemoteColumnName(connection, column.getName());
columnNames.add(columnName);
columnTypes.add(column.getType());
String typeStatement = toWriteMapping(session, column.getType()).getDataType();
if (rowkeyColumns.contains(column)) {
typeStatement += " not null";
pkNames.add(columnName);
}
columnList.add(format("%s %s", getEscapedArgument(columnName), typeStatement));
}
ImmutableList.Builder<String> tableOptions = ImmutableList.builder();
PhoenixTableProperties.getSaltBuckets(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.SALT_BUCKETS + "=" + value));
PhoenixTableProperties.getSplitOn(tableProperties).ifPresent(value -> tableOptions.add("SPLIT ON (" + value.replace('"', '\'') + ")"));
PhoenixTableProperties.getDisableWal(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DISABLE_WAL + "=" + value));
PhoenixTableProperties.getDefaultColumnFamily(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DEFAULT_COLUMN_FAMILY + "=" + value));
PhoenixTableProperties.getBloomfilter(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.BLOOMFILTER + "='" + value + "'"));
PhoenixTableProperties.getVersions(tableProperties).ifPresent(value -> tableOptions.add(HConstants.VERSIONS + "=" + value));
PhoenixTableProperties.getMinVersions(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.MIN_VERSIONS + "=" + value));
PhoenixTableProperties.getCompression(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.COMPRESSION + "='" + value + "'"));
PhoenixTableProperties.getTimeToLive(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.TTL + "=" + value));
PhoenixTableProperties.getDataBlockEncoding(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + value + "'"));
String sql = format("CREATE %s TABLE %s (%s , CONSTRAINT PK PRIMARY KEY (%s)) %s", immutable, getEscapedTableName(schema, table), join(", ", columnList.build()), join(", ", pkNames.build()), join(", ", tableOptions.build()));
execute(session, sql);
return new PhoenixOutputTableHandle(schema, table, columnNames.build(), columnTypes.build(), Optional.empty(), rowkeyColumn);
} catch (SQLException e) {
if (e.getErrorCode() == SQLExceptionCode.TABLE_ALREADY_EXIST.getErrorCode()) {
throw new TrinoException(ALREADY_EXISTS, "Phoenix table already exists", e);
}
throw new TrinoException(PHOENIX_METADATA_ERROR, "Error creating Phoenix table", e);
}
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class BaseRaptorConnectorTest method testTablesSystemTable.
@Test
public void testTablesSystemTable() {
assertUpdate("" + "CREATE TABLE system_tables_test0 (c00 timestamp, c01 varchar, c02 double, c03 bigint, c04 bigint)");
assertUpdate("" + "CREATE TABLE system_tables_test1 (c10 timestamp, c11 varchar, c12 double, c13 bigint, c14 bigint) " + "WITH (temporal_column = 'c10')");
assertUpdate("" + "CREATE TABLE system_tables_test2 (c20 timestamp, c21 varchar, c22 double, c23 bigint, c24 bigint) " + "WITH (temporal_column = 'c20', ordering = ARRAY['c22', 'c21'])");
assertUpdate("" + "CREATE TABLE system_tables_test3 (c30 timestamp, c31 varchar, c32 double, c33 bigint, c34 bigint) " + "WITH (temporal_column = 'c30', bucket_count = 40, bucketed_on = ARRAY ['c34', 'c33'])");
assertUpdate("" + "CREATE TABLE system_tables_test4 (c40 timestamp, c41 varchar, c42 double, c43 bigint, c44 bigint) " + "WITH (temporal_column = 'c40', ordering = ARRAY['c41', 'c42'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c43', 'c44'])");
assertUpdate("" + "CREATE TABLE system_tables_test5 (c50 timestamp, c51 varchar, c52 double, c53 bigint, c54 bigint) " + "WITH (ordering = ARRAY['c51', 'c52'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c53', 'c54'], organized = true)");
MaterializedResult actualResults = computeActual("SELECT * FROM system.tables");
assertEquals(actualResults.getTypes(), ImmutableList.builder().add(// table_schema
VARCHAR).add(// table_name
VARCHAR).add(// temporal_column
VARCHAR).add(// ordering_columns
new ArrayType(VARCHAR)).add(// distribution_name
VARCHAR).add(// bucket_count
BIGINT).add(// bucket_columns
new ArrayType(VARCHAR)).add(// organized
BOOLEAN).build());
Map<String, MaterializedRow> map = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).collect(toImmutableMap(row -> ((String) row.getField(1)), identity()));
assertEquals(map.size(), 6);
assertEquals(map.get("system_tables_test0").getFields(), asList("tpch", "system_tables_test0", null, null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test1").getFields(), asList("tpch", "system_tables_test1", "c10", null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test2").getFields(), asList("tpch", "system_tables_test2", "c20", ImmutableList.of("c22", "c21"), null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test3").getFields(), asList("tpch", "system_tables_test3", "c30", null, null, 40L, ImmutableList.of("c34", "c33"), Boolean.FALSE));
assertEquals(map.get("system_tables_test4").getFields(), asList("tpch", "system_tables_test4", "c40", ImmutableList.of("c41", "c42"), "test_distribution", 50L, ImmutableList.of("c43", "c44"), Boolean.FALSE));
assertEquals(map.get("system_tables_test5").getFields(), asList("tpch", "system_tables_test5", null, ImmutableList.of("c51", "c52"), "test_distribution", 50L, ImmutableList.of("c53", "c54"), Boolean.TRUE));
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch'");
long actualRowCount = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).count();
assertEquals(actualRowCount, 6);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("" + "SELECT distribution_name, bucket_count, bucketing_columns, ordering_columns, temporal_column, organized " + "FROM system.tables " + "WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getTypes(), ImmutableList.of(VARCHAR, BIGINT, new ArrayType(VARCHAR), new ArrayType(VARCHAR), VARCHAR, BOOLEAN));
assertEquals(actualResults.getMaterializedRows().size(), 1);
assertUpdate("DROP TABLE system_tables_test0");
assertUpdate("DROP TABLE system_tables_test1");
assertUpdate("DROP TABLE system_tables_test2");
assertUpdate("DROP TABLE system_tables_test3");
assertUpdate("DROP TABLE system_tables_test4");
assertUpdate("DROP TABLE system_tables_test5");
assertEquals(computeActual("SELECT * FROM system.tables WHERE table_schema IN ('foo', 'bar')").getRowCount(), 0);
}
use of io.trino.spi.type.BigintType.BIGINT in project trino by trinodb.
the class TestIcebergProjectionPushdownPlans method testDereferencePushdown.
@Test
public void testDereferencePushdown() {
String testTable = "test_simple_projection_pushdown" + randomTableSuffix();
QualifiedObjectName completeTableName = new QualifiedObjectName(CATALOG, SCHEMA, testTable);
getQueryRunner().execute(format("CREATE TABLE %s (col0, col1) WITH (partitioning = ARRAY['col1']) AS" + " SELECT CAST(row(5, 6) AS row(x bigint, y bigint)) AS col0, 5 AS col1 WHERE false", testTable));
Session session = getQueryRunner().getDefaultSession();
Optional<TableHandle> tableHandle = getTableHandle(session, completeTableName);
assertTrue(tableHandle.isPresent(), "expected the table handle to be present");
Map<String, ColumnHandle> columns = getColumnHandles(session, completeTableName);
IcebergColumnHandle column0Handle = (IcebergColumnHandle) columns.get("col0");
IcebergColumnHandle column1Handle = (IcebergColumnHandle) columns.get("col1");
IcebergColumnHandle columnX = new IcebergColumnHandle(column0Handle.getColumnIdentity(), column0Handle.getType(), ImmutableList.of(column0Handle.getColumnIdentity().getChildren().get(0).getId()), BIGINT, Optional.empty());
IcebergColumnHandle columnY = new IcebergColumnHandle(column0Handle.getColumnIdentity(), column0Handle.getType(), ImmutableList.of(column0Handle.getColumnIdentity().getChildren().get(1).getId()), BIGINT, Optional.empty());
// Simple Projection pushdown
assertPlan("SELECT col0.x expr_x, col0.y expr_y FROM " + testTable, any(tableScan(equalTo(((IcebergTableHandle) tableHandle.get().getConnectorHandle()).withProjectedColumns(Set.of(columnX, columnY))), TupleDomain.all(), ImmutableMap.of("col0#x", equalTo(columnX), "col0#y", equalTo(columnY)))));
// Projection and predicate pushdown
assertPlan(format("SELECT col0.x FROM %s WHERE col0.x = col1 + 3 and col0.y = 2", testTable), anyTree(filter("y = BIGINT '2' AND (x = CAST((col1 + 3) AS BIGINT))", tableScan(table -> {
IcebergTableHandle icebergTableHandle = (IcebergTableHandle) table;
TupleDomain<IcebergColumnHandle> unenforcedConstraint = icebergTableHandle.getUnenforcedPredicate();
return icebergTableHandle.getProjectedColumns().equals(ImmutableSet.of(column1Handle, columnX, columnY)) && unenforcedConstraint.equals(TupleDomain.withColumnDomains(ImmutableMap.of(columnY, Domain.singleValue(BIGINT, 2L))));
}, TupleDomain.all(), ImmutableMap.of("y", equalTo(columnY), "x", equalTo(columnX), "col1", equalTo(column1Handle))))));
// Projection and predicate pushdown with overlapping columns
assertPlan(format("SELECT col0, col0.y expr_y FROM %s WHERE col0.x = 5", testTable), anyTree(filter("x = BIGINT '5'", tableScan(table -> {
IcebergTableHandle icebergTableHandle = (IcebergTableHandle) table;
TupleDomain<IcebergColumnHandle> unenforcedConstraint = icebergTableHandle.getUnenforcedPredicate();
return icebergTableHandle.getProjectedColumns().equals(ImmutableSet.of(column0Handle, columnX)) && unenforcedConstraint.equals(TupleDomain.withColumnDomains(ImmutableMap.of(columnX, Domain.singleValue(BIGINT, 5L))));
}, TupleDomain.all(), ImmutableMap.of("col0", equalTo(column0Handle), "x", equalTo(columnX))))));
// Projection and predicate pushdown with joins
assertPlan(format("SELECT T.col0.x, T.col0, T.col0.y FROM %s T join %s S on T.col1 = S.col1 WHERE (T.col0.x = 2)", testTable, testTable), anyTree(project(ImmutableMap.of("expr_0_x", expression("expr_0[1]"), "expr_0", expression("expr_0"), "expr_0_y", expression("expr_0[2]")), join(INNER, ImmutableList.of(equiJoinClause("t_expr_1", "s_expr_1")), anyTree(filter("x = BIGINT '2'", tableScan(table -> {
IcebergTableHandle icebergTableHandle = (IcebergTableHandle) table;
TupleDomain<IcebergColumnHandle> unenforcedConstraint = icebergTableHandle.getUnenforcedPredicate();
Set<IcebergColumnHandle> expectedProjections = ImmutableSet.of(column0Handle, column1Handle, columnX);
TupleDomain<IcebergColumnHandle> expectedUnenforcedConstraint = TupleDomain.withColumnDomains(ImmutableMap.of(columnX, Domain.singleValue(BIGINT, 2L)));
return icebergTableHandle.getProjectedColumns().equals(expectedProjections) && unenforcedConstraint.equals(expectedUnenforcedConstraint);
}, TupleDomain.all(), ImmutableMap.of("x", equalTo(columnX), "expr_0", equalTo(column0Handle), "t_expr_1", equalTo(column1Handle))))), anyTree(tableScan(equalTo(((IcebergTableHandle) tableHandle.get().getConnectorHandle()).withProjectedColumns(Set.of(column1Handle))), TupleDomain.all(), ImmutableMap.of("s_expr_1", equalTo(column1Handle))))))));
}
Aggregations