use of io.trino.spi.TrinoException in project trino by trinodb.
the class GlueHiveMetastore method createDatabase.
@Override
public void createDatabase(Database database) {
if (database.getLocation().isEmpty() && defaultDir.isPresent()) {
String databaseLocation = new Path(defaultDir.get(), database.getDatabaseName()).toString();
database = Database.builder(database).setLocation(Optional.of(databaseLocation)).build();
}
try {
DatabaseInput databaseInput = GlueInputConverter.convertDatabase(database);
stats.getCreateDatabase().call(() -> glueClient.createDatabase(new CreateDatabaseRequest().withCatalogId(catalogId).withDatabaseInput(databaseInput)));
} catch (AlreadyExistsException e) {
throw new SchemaAlreadyExistsException(database.getDatabaseName());
} catch (AmazonServiceException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
if (database.getLocation().isPresent()) {
HiveWriteUtils.createDirectory(hdfsContext, hdfsEnvironment, new Path(database.getLocation().get()));
}
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class GlueHiveMetastore method renameColumn.
@Override
public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) {
Table oldTable = getExistingTable(databaseName, tableName);
if (oldTable.getPartitionColumns().stream().anyMatch(c -> c.getName().equals(oldColumnName))) {
throw new TrinoException(NOT_SUPPORTED, "Renaming partition columns is not supported");
}
ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder();
for (Column column : oldTable.getDataColumns()) {
if (column.getName().equals(oldColumnName)) {
newDataColumns.add(new Column(newColumnName, column.getType(), column.getComment()));
} else {
newDataColumns.add(column);
}
}
Table newTable = Table.builder(oldTable).setDataColumns(newDataColumns.build()).build();
replaceTable(databaseName, tableName, newTable, null);
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class ProtoUtils method fromProto.
public static Table fromProto(alluxio.grpc.table.TableInfo table) {
if (!table.hasLayout()) {
throw new TrinoException(NOT_SUPPORTED, "Unsupported table metadata. missing layout.: " + table.getTableName());
}
Layout layout = table.getLayout();
if (!alluxio.table.ProtoUtils.isHiveLayout(layout)) {
throw new TrinoException(NOT_SUPPORTED, "Unsupported table layout: " + layout + " for table: " + table.getTableName());
}
try {
PartitionInfo partitionInfo = alluxio.table.ProtoUtils.toHiveLayout(layout);
// compute the data columns
Set<String> partitionColumns = table.getPartitionColsList().stream().map(FieldSchema::getName).collect(toImmutableSet());
List<FieldSchema> dataColumns = table.getSchema().getColsList().stream().filter((f) -> !partitionColumns.contains(f.getName())).collect(toImmutableList());
Map<String, String> tableParameters = table.getParametersMap();
Table.Builder builder = Table.builder().setDatabaseName(table.getDbName()).setTableName(table.getTableName()).setOwner(Optional.ofNullable(table.getOwner())).setTableType(table.getType().toString()).setDataColumns(dataColumns.stream().map(ProtoUtils::fromProto).collect(toImmutableList())).setPartitionColumns(table.getPartitionColsList().stream().map(ProtoUtils::fromProto).collect(toImmutableList())).setParameters(tableParameters).setViewOriginalText(Optional.empty()).setViewExpandedText(Optional.empty());
alluxio.grpc.table.layout.hive.Storage storage = partitionInfo.getStorage();
builder.getStorageBuilder().setSkewed(storage.getSkewed()).setStorageFormat(fromProto(storage.getStorageFormat())).setLocation(storage.getLocation()).setBucketProperty(storage.hasBucketProperty() ? fromProto(tableParameters, storage.getBucketProperty()) : Optional.empty()).setSerdeParameters(storage.getStorageFormat().getSerdelibParametersMap());
return builder.build();
} catch (InvalidProtocolBufferException e) {
throw new IllegalArgumentException("Failed to extract PartitionInfo from TableInfo", e);
}
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class FileHiveMetastore method addPartitions.
@Override
public synchronized void addPartitions(String databaseName, String tableName, List<PartitionWithStatistics> partitions) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
requireNonNull(partitions, "partitions is null");
Table table = getRequiredTable(databaseName, tableName);
TableType tableType = TableType.valueOf(table.getTableType());
checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE).contains(tableType), "Invalid table type: %s", tableType);
try {
Map<Path, byte[]> schemaFiles = new LinkedHashMap<>();
for (PartitionWithStatistics partitionWithStatistics : partitions) {
Partition partition = partitionWithStatistics.getPartition();
verifiedPartition(table, partition);
Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues());
Path schemaPath = getSchemaPath(PARTITION, partitionMetadataDirectory);
if (metadataFileSystem.exists(schemaPath)) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Partition already exists");
}
byte[] schemaJson = partitionCodec.toJsonBytes(new PartitionMetadata(table, partitionWithStatistics));
schemaFiles.put(schemaPath, schemaJson);
}
Set<Path> createdFiles = new LinkedHashSet<>();
try {
for (Entry<Path, byte[]> entry : schemaFiles.entrySet()) {
try (OutputStream outputStream = metadataFileSystem.create(entry.getKey())) {
createdFiles.add(entry.getKey());
outputStream.write(entry.getValue());
} catch (IOException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Could not write partition schema", e);
}
}
} catch (Throwable e) {
for (Path createdFile : createdFiles) {
try {
metadataFileSystem.delete(createdFile, false);
} catch (IOException ignored) {
}
}
throw e;
}
} catch (IOException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class FileHiveMetastore method setTablePrivileges.
private synchronized void setTablePrivileges(HivePrincipal grantee, String databaseName, String tableName, Collection<HivePrivilegeInfo> privileges) {
requireNonNull(grantee, "grantee is null");
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
requireNonNull(privileges, "privileges is null");
try {
Table table = getRequiredTable(databaseName, tableName);
Path permissionsDirectory = getPermissionsDirectory(table);
boolean created = metadataFileSystem.mkdirs(permissionsDirectory);
if (!created && !metadataFileSystem.isDirectory(permissionsDirectory)) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Could not create permissions directory");
}
Path permissionFilePath = getPermissionsPath(permissionsDirectory, grantee);
List<PermissionMetadata> permissions = privileges.stream().map(hivePrivilegeInfo -> new PermissionMetadata(hivePrivilegeInfo.getHivePrivilege(), hivePrivilegeInfo.isGrantOption(), grantee)).collect(toList());
writeFile("permissions", permissionFilePath, permissionsCodec, permissions, true);
} catch (IOException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
Aggregations