use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class GlueHiveMetastore method setTableOwner.
@Override
public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) {
// TODO Add role support https://github.com/trinodb/trino/issues/5706
if (principal.getType() != USER) {
throw new TrinoException(NOT_SUPPORTED, "Setting table owner type as a role is not supported");
}
try {
Table table = getExistingTable(databaseName, tableName);
TableInput newTableInput = GlueInputConverter.convertTable(table);
newTableInput.setOwner(principal.getName());
stats.getUpdateTable().call(() -> glueClient.updateTable(new UpdateTableRequest().withCatalogId(catalogId).withDatabaseName(databaseName).withTableInput(newTableInput)));
} catch (EntityNotFoundException e) {
throw new TableNotFoundException(new SchemaTableName(databaseName, tableName));
} catch (AmazonServiceException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class FileHiveMetastore method alterTable.
private void alterTable(String databaseName, String tableName, Function<TableMetadata, TableMetadata> alterFunction) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
Path tableMetadataDirectory = getTableMetadataDirectory(databaseName, tableName);
TableMetadata oldTableSchema = readSchemaFile(TABLE, tableMetadataDirectory, tableCodec).orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
checkVersion(oldTableSchema.getWriterVersion());
TableMetadata newTableSchema = alterFunction.apply(oldTableSchema);
if (oldTableSchema == newTableSchema) {
return;
}
writeSchemaFile(TABLE, tableMetadataDirectory, tableCodec, newTableSchema, true);
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class FileHiveMetastore method updateTableStatistics.
@Override
public synchronized void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function<PartitionStatistics, PartitionStatistics> update) {
PartitionStatistics originalStatistics = getTableStatistics(databaseName, tableName);
PartitionStatistics updatedStatistics = update.apply(originalStatistics);
Path tableMetadataDirectory = getTableMetadataDirectory(databaseName, tableName);
TableMetadata tableMetadata = readSchemaFile(TABLE, tableMetadataDirectory, tableCodec).orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
checkVersion(tableMetadata.getWriterVersion());
TableMetadata updatedMetadata = tableMetadata.withParameters(currentVersion, updateStatisticsParameters(tableMetadata.getParameters(), updatedStatistics.getBasicStatistics())).withColumnStatistics(currentVersion, updatedStatistics.getColumnStatistics());
writeSchemaFile(TABLE, tableMetadataDirectory, tableCodec, updatedMetadata, true);
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class AbstractMetastoreTableOperations method getRefreshedLocation.
@Override
protected final String getRefreshedLocation() {
Table table = getTable();
if (isPrestoView(table) && isHiveOrPrestoView(table)) {
// this is a Hive view, hence not a table
throw new TableNotFoundException(getSchemaTableName());
}
if (!isIcebergTable(table)) {
throw new UnknownTableTypeException(getSchemaTableName());
}
String metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
if (metadataLocation == null) {
throw new TrinoException(ICEBERG_INVALID_METADATA, format("Table is missing [%s] property: %s", METADATA_LOCATION_PROP, getSchemaTableName()));
}
return metadataLocation;
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class HiveMetastoreTableOperations method commitToExistingTable.
@Override
protected void commitToExistingTable(TableMetadata base, TableMetadata metadata) {
String newMetadataLocation = writeNewMetadata(metadata, version + 1);
HiveIdentity identity = new HiveIdentity(session.getIdentity());
long lockId = thriftMetastore.acquireTableExclusiveLock(identity, new AcidTransactionOwner(session.getUser()), session.getQueryId(), database, tableName);
try {
Table table;
try {
Table currentTable = fromMetastoreApiTable(thriftMetastore.getTable(identity, database, tableName).orElseThrow(() -> new TableNotFoundException(getSchemaTableName())));
checkState(currentMetadataLocation != null, "No current metadata location for existing table");
String metadataLocation = currentTable.getParameters().get(METADATA_LOCATION_PROP);
if (!currentMetadataLocation.equals(metadataLocation)) {
throw new CommitFailedException("Metadata location [%s] is not same as table metadata location [%s] for %s", currentMetadataLocation, metadataLocation, getSchemaTableName());
}
table = Table.builder(currentTable).setDataColumns(toHiveColumns(metadata.schema().columns())).withStorage(storage -> storage.setLocation(metadata.location())).setParameter(METADATA_LOCATION_PROP, newMetadataLocation).setParameter(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation).build();
} catch (RuntimeException e) {
try {
io().deleteFile(newMetadataLocation);
} catch (RuntimeException ex) {
e.addSuppressed(ex);
}
throw new TrinoException(ICEBERG_COMMIT_ERROR, format("Failed to commit to table %s.%s", database, tableName), e);
}
// todo privileges should not be replaced for an alter
PrincipalPrivileges privileges = table.getOwner().map(MetastoreUtil::buildInitialPrivilegeSet).orElse(NO_PRIVILEGES);
metastore.replaceTable(database, tableName, table, privileges);
} finally {
thriftMetastore.releaseTableLock(identity, lockId);
}
shouldRefresh = true;
}
Aggregations