use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.
the class KuduMetadata method beginCreateTable.
@Override
public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorTableLayout> layout) {
PartitionDesign design = KuduTableProperties.getPartitionDesign(tableMetadata.getProperties());
boolean generateUUID = !design.hasPartitions();
ConnectorTableMetadata finalTableMetadata = tableMetadata;
if (generateUUID) {
String rowId = KuduColumnHandle.ROW_ID;
List<ColumnMetadata> copy = new ArrayList<>(tableMetadata.getColumns());
Map<String, Object> columnProperties = new HashMap<>();
columnProperties.put(KuduTableProperties.PRIMARY_KEY, true);
copy.add(0, ColumnMetadata.builder().setName(rowId).setType(VarcharType.VARCHAR).setComment(Optional.of("key=true")).setHidden(true).setProperties(columnProperties).build());
List<ColumnMetadata> finalColumns = ImmutableList.copyOf(copy);
Map<String, Object> propsCopy = new HashMap<>(tableMetadata.getProperties());
propsCopy.put(KuduTableProperties.PARTITION_BY_HASH_COLUMNS, ImmutableList.of(rowId));
propsCopy.put(KuduTableProperties.PARTITION_BY_HASH_BUCKETS, 2);
Map<String, Object> finalProperties = ImmutableMap.copyOf(propsCopy);
finalTableMetadata = new ConnectorTableMetadata(tableMetadata.getTable(), finalColumns, finalProperties, tableMetadata.getComment());
}
KuduTable table = clientSession.createTable(finalTableMetadata, false);
Schema schema = table.getSchema();
List<ColumnSchema> columns = schema.getColumns();
List<Type> columnTypes = columns.stream().map(TypeHelper::fromKuduColumn).collect(toImmutableList());
List<Type> columnOriginalTypes = finalTableMetadata.getColumns().stream().map(ColumnMetadata::getType).collect(toImmutableList());
return new KuduOutputTableHandle(finalTableMetadata.getTable(), columnOriginalTypes, columnTypes, generateUUID, table);
}
use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.
the class PhoenixClient method beginCreateTable.
@Override
public JdbcOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schema = schemaTableName.getSchemaName();
String table = schemaTableName.getTableName();
if (!getSchemaNames(session).contains(schema)) {
throw new SchemaNotFoundException(schema);
}
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
schema = getIdentifierMapping().toRemoteSchemaName(identity, connection, schema);
table = getIdentifierMapping().toRemoteTableName(identity, connection, schema, table);
schema = toPhoenixSchemaName(schema);
LinkedList<ColumnMetadata> tableColumns = new LinkedList<>(tableMetadata.getColumns());
Map<String, Object> tableProperties = tableMetadata.getProperties();
Optional<Boolean> immutableRows = PhoenixTableProperties.getImmutableRows(tableProperties);
String immutable = immutableRows.isPresent() && immutableRows.get() ? "IMMUTABLE" : "";
ImmutableList.Builder<String> columnNames = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
ImmutableList.Builder<String> columnList = ImmutableList.builder();
Set<ColumnMetadata> rowkeyColumns = tableColumns.stream().filter(col -> isPrimaryKey(col, tableProperties)).collect(toSet());
ImmutableList.Builder<String> pkNames = ImmutableList.builder();
Optional<String> rowkeyColumn = Optional.empty();
if (rowkeyColumns.isEmpty()) {
// Add a rowkey when not specified in DDL
columnList.add(ROWKEY + " bigint not null");
pkNames.add(ROWKEY);
execute(session, format("CREATE SEQUENCE %s", getEscapedTableName(schema, table + "_sequence")));
rowkeyColumn = Optional.of(ROWKEY);
}
for (ColumnMetadata column : tableColumns) {
String columnName = getIdentifierMapping().toRemoteColumnName(connection, column.getName());
columnNames.add(columnName);
columnTypes.add(column.getType());
String typeStatement = toWriteMapping(session, column.getType()).getDataType();
if (rowkeyColumns.contains(column)) {
typeStatement += " not null";
pkNames.add(columnName);
}
columnList.add(format("%s %s", getEscapedArgument(columnName), typeStatement));
}
ImmutableList.Builder<String> tableOptions = ImmutableList.builder();
PhoenixTableProperties.getSaltBuckets(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.SALT_BUCKETS + "=" + value));
PhoenixTableProperties.getSplitOn(tableProperties).ifPresent(value -> tableOptions.add("SPLIT ON (" + value.replace('"', '\'') + ")"));
PhoenixTableProperties.getDisableWal(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DISABLE_WAL + "=" + value));
PhoenixTableProperties.getDefaultColumnFamily(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DEFAULT_COLUMN_FAMILY + "=" + value));
PhoenixTableProperties.getBloomfilter(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.BLOOMFILTER + "='" + value + "'"));
PhoenixTableProperties.getVersions(tableProperties).ifPresent(value -> tableOptions.add(HConstants.VERSIONS + "=" + value));
PhoenixTableProperties.getMinVersions(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.MIN_VERSIONS + "=" + value));
PhoenixTableProperties.getCompression(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.COMPRESSION + "='" + value + "'"));
PhoenixTableProperties.getTimeToLive(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.TTL + "=" + value));
PhoenixTableProperties.getDataBlockEncoding(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + value + "'"));
String sql = format("CREATE %s TABLE %s (%s , CONSTRAINT PK PRIMARY KEY (%s)) %s", immutable, getEscapedTableName(schema, table), join(", ", columnList.build()), join(", ", pkNames.build()), join(", ", tableOptions.build()));
execute(session, sql);
return new PhoenixOutputTableHandle(schema, table, columnNames.build(), columnTypes.build(), Optional.empty(), rowkeyColumn);
} catch (SQLException e) {
if (e.getErrorCode() == SQLExceptionCode.TABLE_ALREADY_EXIST.getErrorCode()) {
throw new TrinoException(ALREADY_EXISTS, "Phoenix table already exists", e);
}
throw new TrinoException(PHOENIX_METADATA_ERROR, "Error creating Phoenix table", e);
}
}
use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.
the class PinotMetadata method listTableColumns.
@Override
public Map<SchemaTableName, List<ColumnMetadata>> listTableColumns(ConnectorSession session, SchemaTablePrefix prefix) {
requireNonNull(prefix, "prefix is null");
ImmutableMap.Builder<SchemaTableName, List<ColumnMetadata>> columns = ImmutableMap.builder();
for (SchemaTableName tableName : listTables(session, prefix)) {
ConnectorTableMetadata tableMetadata = getTableMetadata(tableName);
// table can disappear during listing operation
if (tableMetadata != null) {
columns.put(tableName, tableMetadata.getColumns());
}
}
return columns.buildOrThrow();
}
use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.
the class RaptorMetadata method getTableMetadata.
@Override
public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle tableHandle) {
RaptorTableHandle handle = (RaptorTableHandle) tableHandle;
SchemaTableName tableName = new SchemaTableName(handle.getSchemaName(), handle.getTableName());
List<TableColumn> tableColumns = dao.listTableColumns(handle.getTableId());
if (tableColumns.isEmpty()) {
throw new TableNotFoundException(tableName);
}
ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder();
SortedMap<Integer, String> bucketing = new TreeMap<>();
SortedMap<Integer, String> ordering = new TreeMap<>();
for (TableColumn column : tableColumns) {
if (column.isTemporal()) {
properties.put(TEMPORAL_COLUMN_PROPERTY, column.getColumnName());
}
column.getBucketOrdinal().ifPresent(bucketOrdinal -> bucketing.put(bucketOrdinal, column.getColumnName()));
column.getSortOrdinal().ifPresent(sortOrdinal -> ordering.put(sortOrdinal, column.getColumnName()));
}
if (!bucketing.isEmpty()) {
properties.put(BUCKETED_ON_PROPERTY, ImmutableList.copyOf(bucketing.values()));
}
if (!ordering.isEmpty()) {
properties.put(ORDERING_PROPERTY, ImmutableList.copyOf(ordering.values()));
}
handle.getBucketCount().ifPresent(bucketCount -> properties.put(BUCKET_COUNT_PROPERTY, bucketCount));
handle.getDistributionName().ifPresent(distributionName -> properties.put(DISTRIBUTION_NAME_PROPERTY, distributionName));
// Only display organization property if set
if (handle.isOrganized()) {
properties.put(ORGANIZED_PROPERTY, true);
}
List<ColumnMetadata> columns = tableColumns.stream().map(TableColumn::toColumnMetadata).collect(toCollection(ArrayList::new));
columns.add(hiddenColumn(SHARD_UUID_COLUMN_NAME, SHARD_UUID_COLUMN_TYPE));
if (handle.isBucketed()) {
columns.add(hiddenColumn(BUCKET_NUMBER_COLUMN_NAME, INTEGER));
}
return new ConnectorTableMetadata(tableName, columns, properties.buildOrThrow());
}
use of io.trino.spi.connector.ConnectorTableMetadata in project trino by trinodb.
the class TestPrometheusIntegrationSchema method testGetTableMetadata.
@Test
public void testGetTableMetadata() {
PrometheusMetadata metadata = new PrometheusMetadata(client);
// known table
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(SESSION, RUNTIME_DETERMINED_TABLE_HANDLE);
assertEquals(tableMetadata.getTable(), new SchemaTableName("default", "up"));
assertEquals(tableMetadata.getColumns(), ImmutableList.of(new ColumnMetadata("labels", varcharMapType), new ColumnMetadata("timestamp", TIMESTAMP_COLUMN_TYPE), new ColumnMetadata("value", DOUBLE)));
// unknown tables should produce null
assertNull(metadata.getTableMetadata(SESSION, new PrometheusTableHandle("unknown", "unknown")));
assertNull(metadata.getTableMetadata(SESSION, new PrometheusTableHandle("default", "unknown")));
assertNull(metadata.getTableMetadata(SESSION, new PrometheusTableHandle("unknown", "numbers")));
}
Aggregations