use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class BaseJdbcClient method createTable.
protected JdbcOutputTableHandle createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, String targetTableName) throws SQLException {
SchemaTableName schemaTableName = tableMetadata.getTable();
ConnectorIdentity identity = session.getIdentity();
if (!getSchemaNames(session).contains(schemaTableName.getSchemaName())) {
throw new TrinoException(NOT_FOUND, "Schema not found: " + schemaTableName.getSchemaName());
}
try (Connection connection = connectionFactory.openConnection(session)) {
String remoteSchema = identifierMapping.toRemoteSchemaName(identity, connection, schemaTableName.getSchemaName());
String remoteTable = identifierMapping.toRemoteTableName(identity, connection, remoteSchema, schemaTableName.getTableName());
String remoteTargetTableName = identifierMapping.toRemoteTableName(identity, connection, remoteSchema, targetTableName);
String catalog = connection.getCatalog();
ImmutableList.Builder<String> columnNames = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
ImmutableList.Builder<String> columnList = ImmutableList.builder();
for (ColumnMetadata column : tableMetadata.getColumns()) {
String columnName = identifierMapping.toRemoteColumnName(connection, column.getName());
columnNames.add(columnName);
columnTypes.add(column.getType());
columnList.add(getColumnDefinitionSql(session, column, columnName));
}
RemoteTableName remoteTableName = new RemoteTableName(Optional.ofNullable(catalog), Optional.ofNullable(remoteSchema), remoteTargetTableName);
String sql = createTableSql(remoteTableName, columnList.build(), tableMetadata);
execute(connection, sql);
return new JdbcOutputTableHandle(catalog, remoteSchema, remoteTable, columnNames.build(), columnTypes.build(), Optional.empty(), remoteTargetTableName);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class BaseJdbcClient method createSchema.
@Override
public void createSchema(ConnectorSession session, String schemaName) {
ConnectorIdentity identity = session.getIdentity();
try (Connection connection = connectionFactory.openConnection(session)) {
schemaName = identifierMapping.toRemoteSchemaName(identity, connection, schemaName);
execute(connection, createSchemaSql(schemaName));
} catch (SQLException e) {
throw new TrinoException(JDBC_ERROR, e);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class BaseJdbcClient method dropSchema.
@Override
public void dropSchema(ConnectorSession session, String schemaName) {
ConnectorIdentity identity = session.getIdentity();
try (Connection connection = connectionFactory.openConnection(session)) {
schemaName = identifierMapping.toRemoteSchemaName(identity, connection, schemaName);
execute(connection, dropSchemaSql(schemaName));
} catch (SQLException e) {
throw new TrinoException(JDBC_ERROR, e);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class PhoenixClient method beginCreateTable.
@Override
public JdbcOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schema = schemaTableName.getSchemaName();
String table = schemaTableName.getTableName();
if (!getSchemaNames(session).contains(schema)) {
throw new SchemaNotFoundException(schema);
}
try (Connection connection = connectionFactory.openConnection(session)) {
ConnectorIdentity identity = session.getIdentity();
schema = getIdentifierMapping().toRemoteSchemaName(identity, connection, schema);
table = getIdentifierMapping().toRemoteTableName(identity, connection, schema, table);
schema = toPhoenixSchemaName(schema);
LinkedList<ColumnMetadata> tableColumns = new LinkedList<>(tableMetadata.getColumns());
Map<String, Object> tableProperties = tableMetadata.getProperties();
Optional<Boolean> immutableRows = PhoenixTableProperties.getImmutableRows(tableProperties);
String immutable = immutableRows.isPresent() && immutableRows.get() ? "IMMUTABLE" : "";
ImmutableList.Builder<String> columnNames = ImmutableList.builder();
ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();
ImmutableList.Builder<String> columnList = ImmutableList.builder();
Set<ColumnMetadata> rowkeyColumns = tableColumns.stream().filter(col -> isPrimaryKey(col, tableProperties)).collect(toSet());
ImmutableList.Builder<String> pkNames = ImmutableList.builder();
Optional<String> rowkeyColumn = Optional.empty();
if (rowkeyColumns.isEmpty()) {
// Add a rowkey when not specified in DDL
columnList.add(ROWKEY + " bigint not null");
pkNames.add(ROWKEY);
execute(session, format("CREATE SEQUENCE %s", getEscapedTableName(schema, table + "_sequence")));
rowkeyColumn = Optional.of(ROWKEY);
}
for (ColumnMetadata column : tableColumns) {
String columnName = getIdentifierMapping().toRemoteColumnName(connection, column.getName());
columnNames.add(columnName);
columnTypes.add(column.getType());
String typeStatement = toWriteMapping(session, column.getType()).getDataType();
if (rowkeyColumns.contains(column)) {
typeStatement += " not null";
pkNames.add(columnName);
}
columnList.add(format("%s %s", getEscapedArgument(columnName), typeStatement));
}
ImmutableList.Builder<String> tableOptions = ImmutableList.builder();
PhoenixTableProperties.getSaltBuckets(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.SALT_BUCKETS + "=" + value));
PhoenixTableProperties.getSplitOn(tableProperties).ifPresent(value -> tableOptions.add("SPLIT ON (" + value.replace('"', '\'') + ")"));
PhoenixTableProperties.getDisableWal(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DISABLE_WAL + "=" + value));
PhoenixTableProperties.getDefaultColumnFamily(tableProperties).ifPresent(value -> tableOptions.add(TableProperty.DEFAULT_COLUMN_FAMILY + "=" + value));
PhoenixTableProperties.getBloomfilter(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.BLOOMFILTER + "='" + value + "'"));
PhoenixTableProperties.getVersions(tableProperties).ifPresent(value -> tableOptions.add(HConstants.VERSIONS + "=" + value));
PhoenixTableProperties.getMinVersions(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.MIN_VERSIONS + "=" + value));
PhoenixTableProperties.getCompression(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.COMPRESSION + "='" + value + "'"));
PhoenixTableProperties.getTimeToLive(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.TTL + "=" + value));
PhoenixTableProperties.getDataBlockEncoding(tableProperties).ifPresent(value -> tableOptions.add(HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + value + "'"));
String sql = format("CREATE %s TABLE %s (%s , CONSTRAINT PK PRIMARY KEY (%s)) %s", immutable, getEscapedTableName(schema, table), join(", ", columnList.build()), join(", ", pkNames.build()), join(", ", tableOptions.build()));
execute(session, sql);
return new PhoenixOutputTableHandle(schema, table, columnNames.build(), columnTypes.build(), Optional.empty(), rowkeyColumn);
} catch (SQLException e) {
if (e.getErrorCode() == SQLExceptionCode.TABLE_ALREADY_EXIST.getErrorCode()) {
throw new TrinoException(ALREADY_EXISTS, "Phoenix table already exists", e);
}
throw new TrinoException(PHOENIX_METADATA_ERROR, "Error creating Phoenix table", e);
}
}
use of io.trino.spi.security.ConnectorIdentity in project trino by trinodb.
the class IcebergPageSourceProvider method createParquetPageSource.
private static ReaderPageSource createParquetPageSource(HdfsEnvironment hdfsEnvironment, ConnectorIdentity identity, Configuration configuration, Path path, long start, long length, long fileSize, List<IcebergColumnHandle> regularColumns, ParquetReaderOptions options, TupleDomain<IcebergColumnHandle> effectivePredicate, FileFormatDataSourceStats fileFormatDataSourceStats, Optional<NameMapping> nameMapping) {
AggregatedMemoryContext memoryContext = newSimpleAggregatedMemoryContext();
ParquetDataSource dataSource = null;
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(identity, path, configuration);
FSDataInputStream inputStream = hdfsEnvironment.doAs(identity, () -> fileSystem.open(path));
dataSource = new HdfsParquetDataSource(new ParquetDataSourceId(path.toString()), fileSize, inputStream, fileFormatDataSourceStats, options);
// extra variable required for lambda below
ParquetDataSource theDataSource = dataSource;
ParquetMetadata parquetMetadata = hdfsEnvironment.doAs(identity, () -> MetadataReader.readFooter(theDataSource));
FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
MessageType fileSchema = fileMetaData.getSchema();
if (nameMapping.isPresent() && !ParquetSchemaUtil.hasIds(fileSchema)) {
// NameMapping conversion is necessary because MetadataReader converts all column names to lowercase and NameMapping is case sensitive
fileSchema = ParquetSchemaUtil.applyNameMapping(fileSchema, convertToLowercase(nameMapping.get()));
}
// Mapping from Iceberg field ID to Parquet fields.
Map<Integer, org.apache.parquet.schema.Type> parquetIdToField = fileSchema.getFields().stream().filter(field -> field.getId() != null).collect(toImmutableMap(field -> field.getId().intValue(), Function.identity()));
Optional<ReaderColumns> columnProjections = projectColumns(regularColumns);
List<IcebergColumnHandle> readColumns = columnProjections.map(readerColumns -> (List<IcebergColumnHandle>) readerColumns.get().stream().map(IcebergColumnHandle.class::cast).collect(toImmutableList())).orElse(regularColumns);
List<org.apache.parquet.schema.Type> parquetFields = readColumns.stream().map(column -> parquetIdToField.get(column.getId())).collect(toList());
MessageType requestedSchema = new MessageType(fileSchema.getName(), parquetFields.stream().filter(Objects::nonNull).collect(toImmutableList()));
Map<List<String>, RichColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, requestedSchema);
TupleDomain<ColumnDescriptor> parquetTupleDomain = getParquetTupleDomain(descriptorsByPath, effectivePredicate);
Predicate parquetPredicate = buildPredicate(requestedSchema, parquetTupleDomain, descriptorsByPath, UTC);
List<BlockMetaData> blocks = new ArrayList<>();
for (BlockMetaData block : parquetMetadata.getBlocks()) {
long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
if (start <= firstDataPage && firstDataPage < start + length && predicateMatches(parquetPredicate, block, dataSource, descriptorsByPath, parquetTupleDomain)) {
blocks.add(block);
}
}
MessageColumnIO messageColumnIO = getColumnIO(fileSchema, requestedSchema);
ParquetReader parquetReader = new ParquetReader(Optional.ofNullable(fileMetaData.getCreatedBy()), messageColumnIO, blocks, Optional.empty(), dataSource, UTC, memoryContext, options);
ImmutableList.Builder<Type> trinoTypes = ImmutableList.builder();
ImmutableList.Builder<Optional<Field>> internalFields = ImmutableList.builder();
for (int columnIndex = 0; columnIndex < readColumns.size(); columnIndex++) {
IcebergColumnHandle column = readColumns.get(columnIndex);
org.apache.parquet.schema.Type parquetField = parquetFields.get(columnIndex);
Type trinoType = column.getBaseType();
trinoTypes.add(trinoType);
if (parquetField == null) {
internalFields.add(Optional.empty());
} else {
// The top level columns are already mapped by name/id appropriately.
ColumnIO columnIO = messageColumnIO.getChild(parquetField.getName());
internalFields.add(IcebergParquetColumnIOConverter.constructField(new FieldContext(trinoType, column.getColumnIdentity()), columnIO));
}
}
return new ReaderPageSource(new ParquetPageSource(parquetReader, trinoTypes.build(), internalFields.build()), columnProjections);
} catch (IOException | RuntimeException e) {
try {
if (dataSource != null) {
dataSource.close();
}
} catch (IOException ignored) {
}
if (e instanceof TrinoException) {
throw (TrinoException) e;
}
String message = format("Error opening Iceberg split %s (offset=%s, length=%s): %s", path, start, length, e.getMessage());
if (e instanceof ParquetCorruptionException) {
throw new TrinoException(ICEBERG_BAD_DATA, message, e);
}
if (e instanceof BlockMissingException) {
throw new TrinoException(ICEBERG_MISSING_DATA, message, e);
}
throw new TrinoException(ICEBERG_CANNOT_OPEN_SPLIT, message, e);
}
}
Aggregations