use of io.prestosql.spi.security.PrincipalType.USER in project hetu-core by openlookeng.
the class SqlStandardAccessControlMetadata method grantTablePrivileges.
@Override
public void grantTablePrivileges(ConnectorSession session, SchemaTableName schemaTableName, Set<Privilege> privileges, HivePrincipal grantee, boolean grantOption) {
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Set<HivePrivilegeInfo> hivePrivilegeInfos = privileges.stream().map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new HivePrincipal(USER, session.getUser()), new HivePrincipal(USER, session.getUser()))).collect(toSet());
metastore.grantTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos);
}
use of io.prestosql.spi.security.PrincipalType.USER in project boostkit-bigdata by kunpengcompute.
the class HiveMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
Optional<HiveBucketProperty> bucketProperty = HiveTableProperties.getBucketProperty(tableMetadata.getProperties());
if ((bucketProperty.isPresent() || !partitionedBy.isEmpty()) && HiveTableProperties.getAvroSchemaUrl(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "Bucketing/Partitioning columns not supported when Avro schema url is set");
}
List<HiveColumnHandle> columnHandles = getColumnHandles(tableMetadata, ImmutableSet.copyOf(partitionedBy), typeTranslator);
HiveStorageFormat hiveStorageFormat = HiveTableProperties.getHiveStorageFormat(tableMetadata.getProperties());
Map<String, String> tableProperties = getEmptyTableProperties(tableMetadata, bucketProperty, new HdfsContext(session, schemaName, tableName));
hiveStorageFormat.validateColumns(columnHandles);
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
checkPartitionTypesSupported(partitionColumns);
boolean external = isExternalTable(tableMetadata.getProperties());
String externalLocation = getExternalLocation(tableMetadata.getProperties());
if ((external || (externalLocation != null)) && !createsOfNonManagedTablesEnabled) {
throw new PrestoException(NOT_SUPPORTED, "Cannot create non-managed Hive table");
}
Path targetPath;
Optional<String> location = getLocation(tableMetadata.getProperties());
// User specifies the location property
if (location.isPresent()) {
if (!tableCreatesWithLocationAllowed) {
throw new PrestoException(NOT_SUPPORTED, format("Setting %s property is not allowed", LOCATION_PROPERTY));
}
targetPath = getPath(new HdfsContext(session, schemaName, tableName), location.get(), external);
} else {
// User specifies external property, but location property is absent
if (external) {
throw new PrestoException(NOT_SUPPORTED, format("Cannot create external Hive table without location. Set it through '%s' property", LOCATION_PROPERTY));
}
// User specifies the external location property
if (externalLocation != null) {
external = true;
targetPath = getPath(new HdfsContext(session, schemaName, tableName), externalLocation, true);
} else // Default option
{
external = false;
LocationHandle locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, Optional.empty(), Optional.empty(), HiveWriteUtils.OpertionType.CREATE_TABLE);
targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
}
}
Table table = buildTableObject(session.getQueryId(), schemaName, tableName, session.getUser(), columnHandles, hiveStorageFormat, partitionedBy, bucketProperty, tableProperties, targetPath, external, prestoVersion);
PrincipalPrivileges principalPrivileges = MetastoreUtil.buildInitialPrivilegeSet(table.getOwner());
HiveBasicStatistics basicStatistics = table.getPartitionColumns().isEmpty() ? HiveBasicStatistics.createZeroStatistics() : HiveBasicStatistics.createEmptyStatistics();
metastore.createTable(session, table, principalPrivileges, Optional.empty(), ignoreExisting, new PartitionStatistics(basicStatistics, ImmutableMap.of()));
}
use of io.prestosql.spi.security.PrincipalType.USER in project boostkit-bigdata by kunpengcompute.
the class SqlStandardAccessControlMetadata method revokeTablePrivileges.
@Override
public void revokeTablePrivileges(ConnectorSession session, SchemaTableName schemaTableName, Set<Privilege> privileges, HivePrincipal grantee, boolean grantOption) {
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Set<HivePrivilegeInfo> hivePrivilegeInfos = privileges.stream().map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new HivePrincipal(USER, session.getUser()), new HivePrincipal(USER, session.getUser()))).collect(toSet());
metastore.revokeTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos);
}
use of io.prestosql.spi.security.PrincipalType.USER in project boostkit-bigdata by kunpengcompute.
the class SqlStandardAccessControlMetadata method grantTablePrivileges.
@Override
public void grantTablePrivileges(ConnectorSession session, SchemaTableName schemaTableName, Set<Privilege> privileges, HivePrincipal grantee, boolean grantOption) {
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Set<HivePrivilegeInfo> hivePrivilegeInfos = privileges.stream().map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new HivePrincipal(USER, session.getUser()), new HivePrincipal(USER, session.getUser()))).collect(toSet());
metastore.grantTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos);
}
Aggregations