use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class CachingHiveMetastore method loadPartitionsByNames.
private Map<HivePartitionName, Optional<Partition>> loadPartitionsByNames(Iterable<? extends HivePartitionName> partitionNames) {
requireNonNull(partitionNames, "partitionNames is null");
checkArgument(!Iterables.isEmpty(partitionNames), "partitionNames is empty");
HivePartitionName firstPartition = Iterables.get(partitionNames, 0);
HiveTableName hiveTableName = firstPartition.getHiveTableName();
Optional<Table> table = getTable(hiveTableName.getDatabaseName(), hiveTableName.getTableName());
if (table.isEmpty()) {
return stream(partitionNames).collect(toImmutableMap(name -> name, name -> Optional.empty()));
}
List<String> partitionsToFetch = new ArrayList<>();
for (HivePartitionName partitionName : partitionNames) {
checkArgument(partitionName.getHiveTableName().equals(hiveTableName), "Expected table name %s but got %s", hiveTableName, partitionName.getHiveTableName());
partitionsToFetch.add(partitionName.getPartitionName().orElseThrow());
}
ImmutableMap.Builder<HivePartitionName, Optional<Partition>> partitions = ImmutableMap.builder();
Map<String, Optional<Partition>> partitionsByNames = delegate.getPartitionsByNames(table.get(), partitionsToFetch);
for (HivePartitionName partitionName : partitionNames) {
partitions.put(partitionName, partitionsByNames.getOrDefault(partitionName.getPartitionName().orElseThrow(), Optional.empty()));
}
return partitions.buildOrThrow();
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method listTablePrivileges.
@Override
public synchronized Set<HivePrivilegeInfo> listTablePrivileges(String databaseName, String tableName, Optional<String> tableOwner, Optional<HivePrincipal> principal) {
Table table = getRequiredTable(databaseName, tableName);
Path permissionsDirectory = getPermissionsDirectory(table);
if (principal.isEmpty()) {
Builder<HivePrivilegeInfo> privileges = ImmutableSet.<HivePrivilegeInfo>builder().addAll(readAllPermissions(permissionsDirectory));
tableOwner.ifPresent(owner -> privileges.add(new HivePrivilegeInfo(OWNERSHIP, true, new HivePrincipal(USER, owner), new HivePrincipal(USER, owner))));
return privileges.build();
}
ImmutableSet.Builder<HivePrivilegeInfo> result = ImmutableSet.builder();
if (principal.get().getType() == USER && table.getOwner().orElseThrow().equals(principal.get().getName())) {
result.add(new HivePrivilegeInfo(OWNERSHIP, true, principal.get(), principal.get()));
}
result.addAll(readPermissionsFile(getPermissionsPath(permissionsDirectory, principal.get())));
return result.build();
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method addPartitions.
@Override
public synchronized void addPartitions(String databaseName, String tableName, List<PartitionWithStatistics> partitions) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
requireNonNull(partitions, "partitions is null");
Table table = getRequiredTable(databaseName, tableName);
TableType tableType = TableType.valueOf(table.getTableType());
checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE).contains(tableType), "Invalid table type: %s", tableType);
try {
Map<Path, byte[]> schemaFiles = new LinkedHashMap<>();
for (PartitionWithStatistics partitionWithStatistics : partitions) {
Partition partition = partitionWithStatistics.getPartition();
verifiedPartition(table, partition);
Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues());
Path schemaPath = getSchemaPath(PARTITION, partitionMetadataDirectory);
if (metadataFileSystem.exists(schemaPath)) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Partition already exists");
}
byte[] schemaJson = partitionCodec.toJsonBytes(new PartitionMetadata(table, partitionWithStatistics));
schemaFiles.put(schemaPath, schemaJson);
}
Set<Path> createdFiles = new LinkedHashSet<>();
try {
for (Entry<Path, byte[]> entry : schemaFiles.entrySet()) {
try (OutputStream outputStream = metadataFileSystem.create(entry.getKey())) {
createdFiles.add(entry.getKey());
outputStream.write(entry.getValue());
} catch (IOException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Could not write partition schema", e);
}
}
} catch (Throwable e) {
for (Path createdFile : createdFiles) {
try {
metadataFileSystem.delete(createdFile, false);
} catch (IOException ignored) {
}
}
throw e;
}
} catch (IOException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method setTablePrivileges.
private synchronized void setTablePrivileges(HivePrincipal grantee, String databaseName, String tableName, Collection<HivePrivilegeInfo> privileges) {
requireNonNull(grantee, "grantee is null");
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
requireNonNull(privileges, "privileges is null");
try {
Table table = getRequiredTable(databaseName, tableName);
Path permissionsDirectory = getPermissionsDirectory(table);
boolean created = metadataFileSystem.mkdirs(permissionsDirectory);
if (!created && !metadataFileSystem.isDirectory(permissionsDirectory)) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Could not create permissions directory");
}
Path permissionFilePath = getPermissionsPath(permissionsDirectory, grantee);
List<PermissionMetadata> permissions = privileges.stream().map(hivePrivilegeInfo -> new PermissionMetadata(hivePrivilegeInfo.getHivePrivilege(), hivePrivilegeInfo.isGrantOption(), grantee)).collect(toList());
writeFile("permissions", permissionFilePath, permissionsCodec, permissions, true);
} catch (IOException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method dropPartition.
@Override
public synchronized void dropPartition(String databaseName, String tableName, List<String> partitionValues, boolean deleteData) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
requireNonNull(partitionValues, "partitionValues is null");
Optional<Table> tableReference = getTable(databaseName, tableName);
if (tableReference.isEmpty()) {
return;
}
Table table = tableReference.get();
Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partitionValues);
if (deleteData) {
deleteDirectoryAndSchema(PARTITION, partitionMetadataDirectory);
} else {
deleteSchemaFile(PARTITION, partitionMetadataDirectory);
}
}
Aggregations