use of io.prestosql.plugin.hive.metastore.Database in project boostkit-bigdata by kunpengcompute.
the class FileHiveMetastore method getAllTables.
@Override
public synchronized Optional<List<String>> getAllTables(String databaseName) {
requireNonNull(databaseName, "databaseName is null");
Optional<Database> database = getDatabase(databaseName);
if (!database.isPresent()) {
return Optional.empty();
}
Path databaseMetadataDirectory = getDatabaseMetadataDirectory(databaseName);
List<String> tables = getChildSchemaDirectories(databaseMetadataDirectory).stream().map(Path::getName).collect(toList());
return Optional.of(ImmutableList.copyOf(tables));
}
use of io.prestosql.plugin.hive.metastore.Database in project boostkit-bigdata by kunpengcompute.
the class HiveMetadata method beginCreateTable.
@Override
public HiveOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
if (getExternalLocation(tableMetadata.getProperties()) != null || isExternalTable(tableMetadata.getProperties())) {
throw new PrestoException(NOT_SUPPORTED, "External tables cannot be created using CREATE TABLE AS");
}
if (HiveTableProperties.getAvroSchemaUrl(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "CREATE TABLE AS not supported when Avro schema url is set");
}
HiveStorageFormat tableStorageFormat = HiveTableProperties.getHiveStorageFormat(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
Optional<HiveBucketProperty> bucketProperty = HiveTableProperties.getBucketProperty(tableMetadata.getProperties());
// get the root directory for the database
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Map<String, String> tableProperties = getEmptyTableProperties(tableMetadata, bucketProperty, new HdfsContext(session, schemaName, tableName));
List<HiveColumnHandle> columnHandles = getColumnHandles(tableMetadata, ImmutableSet.copyOf(partitionedBy), typeTranslator);
HiveStorageFormat partitionStorageFormat = HiveSessionProperties.isRespectTableFormat(session) ? tableStorageFormat : HiveSessionProperties.getHiveStorageFormat(session);
// unpartitioned tables ignore the partition storage format
HiveStorageFormat actualStorageFormat = partitionedBy.isEmpty() ? tableStorageFormat : partitionStorageFormat;
actualStorageFormat.validateColumns(columnHandles);
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
checkPartitionTypesSupported(partitionColumns);
Optional<String> location = getLocation(tableMetadata.getProperties());
if (location.isPresent() && !tableCreatesWithLocationAllowed) {
throw new PrestoException(NOT_SUPPORTED, format("Setting %s property is not allowed", LOCATION_PROPERTY));
}
Optional<WriteIdInfo> writeIdInfo = Optional.empty();
if (AcidUtils.isTransactionalTable(tableProperties)) {
// Create the HiveTableHandle for just to obtain writeIds.
List<HiveColumnHandle> partitionColumnHandles = partitionedBy.stream().map(columnHandlesByName::get).collect(toList());
HiveTableHandle tableHandle = new HiveTableHandle(schemaName, tableName, tableProperties, partitionColumnHandles, Optional.empty());
Optional<Long> writeId = metastore.getTableWriteId(session, tableHandle, HiveACIDWriteType.INSERT);
if (!writeId.isPresent()) {
throw new IllegalStateException("No validWriteIds present");
}
writeIdInfo = Optional.of(new WriteIdInfo(writeId.get(), writeId.get(), 0));
}
LocationHandle locationHandle;
if (location.isPresent()) {
Path path = getPath(new HdfsContext(session, schemaName, tableName), location.get(), false);
locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, writeIdInfo, Optional.of(path), HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
} else {
locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, writeIdInfo, Optional.empty(), HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
}
HiveOutputTableHandle result = new HiveOutputTableHandle(schemaName, tableName, columnHandles, metastore.generatePageSinkMetadata(new HiveIdentity(session), schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, partitionedBy, bucketProperty, session.getUser(), tableProperties);
LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), schemaTableName);
return result;
}
use of io.prestosql.plugin.hive.metastore.Database in project boostkit-bigdata by kunpengcompute.
the class SqlStandardAccessControl method isDatabaseOwner.
private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, ConnectorIdentity identity, String databaseName) {
// all users are "owners" of the default database
if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(databaseName)) {
return true;
}
if (isAdmin(transaction, identity)) {
return true;
}
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) transaction));
Optional<Database> databaseMetadata = metastore.getDatabase(databaseName);
if (!databaseMetadata.isPresent()) {
return false;
}
Database database = databaseMetadata.get();
// a database can be owned by a user or role
if (database.getOwnerType() == USER && identity.getUser().equals(database.getOwnerName())) {
return true;
}
if (database.getOwnerType() == ROLE && isRoleEnabled(identity, metastore::listRoleGrants, database.getOwnerName())) {
return true;
}
return false;
}
Aggregations